repo_name
stringlengths
6
103
path
stringlengths
4
209
copies
stringlengths
1
4
size
stringlengths
4
7
content
stringlengths
838
1.04M
license
stringclasses
15 values
open-mmlab/mmdetection
mmdet/models/dense_heads/ssd_head.py
1
14790
# Copyright (c) OpenMMLab. All rights reserved. import warnings import torch import torch.nn as nn import torch.nn.functional as F from mmcv.cnn import ConvModule, DepthwiseSeparableConvModule from mmcv.runner import force_fp32 from mmdet.core import (build_assigner, build_bbox_coder, build_prior_generator, build_sampler, multi_apply) from ..builder import HEADS from ..losses import smooth_l1_loss from .anchor_head import AnchorHead # TODO: add loss evaluator for SSD @HEADS.register_module() class SSDHead(AnchorHead): """SSD head used in https://arxiv.org/abs/1512.02325. Args: num_classes (int): Number of categories excluding the background category. in_channels (int): Number of channels in the input feature map. stacked_convs (int): Number of conv layers in cls and reg tower. Default: 0. feat_channels (int): Number of hidden channels when stacked_convs > 0. Default: 256. use_depthwise (bool): Whether to use DepthwiseSeparableConv. Default: False. conv_cfg (dict): Dictionary to construct and config conv layer. Default: None. norm_cfg (dict): Dictionary to construct and config norm layer. Default: None. act_cfg (dict): Dictionary to construct and config activation layer. Default: None. anchor_generator (dict): Config dict for anchor generator bbox_coder (dict): Config of bounding box coder. reg_decoded_bbox (bool): If true, the regression loss would be applied directly on decoded bounding boxes, converting both the predicted boxes and regression targets to absolute coordinates format. Default False. It should be `True` when using `IoULoss`, `GIoULoss`, or `DIoULoss` in the bbox head. train_cfg (dict): Training config of anchor head. test_cfg (dict): Testing config of anchor head. init_cfg (dict or list[dict], optional): Initialization config dict. """ # noqa: W605 def __init__(self, num_classes=80, in_channels=(512, 1024, 512, 256, 256, 256), stacked_convs=0, feat_channels=256, use_depthwise=False, conv_cfg=None, norm_cfg=None, act_cfg=None, anchor_generator=dict( type='SSDAnchorGenerator', scale_major=False, input_size=300, strides=[8, 16, 32, 64, 100, 300], ratios=([2], [2, 3], [2, 3], [2, 3], [2], [2]), basesize_ratio_range=(0.1, 0.9)), bbox_coder=dict( type='DeltaXYWHBBoxCoder', clip_border=True, target_means=[.0, .0, .0, .0], target_stds=[1.0, 1.0, 1.0, 1.0], ), reg_decoded_bbox=False, train_cfg=None, test_cfg=None, init_cfg=dict( type='Xavier', layer='Conv2d', distribution='uniform', bias=0)): super(AnchorHead, self).__init__(init_cfg) self.num_classes = num_classes self.in_channels = in_channels self.stacked_convs = stacked_convs self.feat_channels = feat_channels self.use_depthwise = use_depthwise self.conv_cfg = conv_cfg self.norm_cfg = norm_cfg self.act_cfg = act_cfg self.cls_out_channels = num_classes + 1 # add background class self.prior_generator = build_prior_generator(anchor_generator) # Usually the numbers of anchors for each level are the same # except SSD detectors. So it is an int in the most dense # heads but a list of int in SSDHead self.num_base_priors = self.prior_generator.num_base_priors self._init_layers() self.bbox_coder = build_bbox_coder(bbox_coder) self.reg_decoded_bbox = reg_decoded_bbox self.use_sigmoid_cls = False self.cls_focal_loss = False self.train_cfg = train_cfg self.test_cfg = test_cfg # set sampling=False for archor_target self.sampling = False if self.train_cfg: self.assigner = build_assigner(self.train_cfg.assigner) # SSD sampling=False so use PseudoSampler sampler_cfg = dict(type='PseudoSampler') self.sampler = build_sampler(sampler_cfg, context=self) self.fp16_enabled = False @property def num_anchors(self): """ Returns: list[int]: Number of base_anchors on each point of each level. """ warnings.warn('DeprecationWarning: `num_anchors` is deprecated, ' 'please use "num_base_priors" instead') return self.num_base_priors def _init_layers(self): """Initialize layers of the head.""" self.cls_convs = nn.ModuleList() self.reg_convs = nn.ModuleList() # TODO: Use registry to choose ConvModule type conv = DepthwiseSeparableConvModule \ if self.use_depthwise else ConvModule for channel, num_base_priors in zip(self.in_channels, self.num_base_priors): cls_layers = [] reg_layers = [] in_channel = channel # build stacked conv tower, not used in default ssd for i in range(self.stacked_convs): cls_layers.append( conv( in_channel, self.feat_channels, 3, padding=1, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg, act_cfg=self.act_cfg)) reg_layers.append( conv( in_channel, self.feat_channels, 3, padding=1, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg, act_cfg=self.act_cfg)) in_channel = self.feat_channels # SSD-Lite head if self.use_depthwise: cls_layers.append( ConvModule( in_channel, in_channel, 3, padding=1, groups=in_channel, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg, act_cfg=self.act_cfg)) reg_layers.append( ConvModule( in_channel, in_channel, 3, padding=1, groups=in_channel, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg, act_cfg=self.act_cfg)) cls_layers.append( nn.Conv2d( in_channel, num_base_priors * self.cls_out_channels, kernel_size=1 if self.use_depthwise else 3, padding=0 if self.use_depthwise else 1)) reg_layers.append( nn.Conv2d( in_channel, num_base_priors * 4, kernel_size=1 if self.use_depthwise else 3, padding=0 if self.use_depthwise else 1)) self.cls_convs.append(nn.Sequential(*cls_layers)) self.reg_convs.append(nn.Sequential(*reg_layers)) def forward(self, feats): """Forward features from the upstream network. Args: feats (tuple[Tensor]): Features from the upstream network, each is a 4D-tensor. Returns: tuple: cls_scores (list[Tensor]): Classification scores for all scale levels, each is a 4D-tensor, the channels number is num_anchors * num_classes. bbox_preds (list[Tensor]): Box energies / deltas for all scale levels, each is a 4D-tensor, the channels number is num_anchors * 4. """ cls_scores = [] bbox_preds = [] for feat, reg_conv, cls_conv in zip(feats, self.reg_convs, self.cls_convs): cls_scores.append(cls_conv(feat)) bbox_preds.append(reg_conv(feat)) return cls_scores, bbox_preds def loss_single(self, cls_score, bbox_pred, anchor, labels, label_weights, bbox_targets, bbox_weights, num_total_samples): """Compute loss of a single image. Args: cls_score (Tensor): Box scores for eachimage Has shape (num_total_anchors, num_classes). bbox_pred (Tensor): Box energies / deltas for each image level with shape (num_total_anchors, 4). anchors (Tensor): Box reference for each scale level with shape (num_total_anchors, 4). labels (Tensor): Labels of each anchors with shape (num_total_anchors,). label_weights (Tensor): Label weights of each anchor with shape (num_total_anchors,) bbox_targets (Tensor): BBox regression targets of each anchor weight shape (num_total_anchors, 4). bbox_weights (Tensor): BBox regression loss weights of each anchor with shape (num_total_anchors, 4). num_total_samples (int): If sampling, num total samples equal to the number of total anchors; Otherwise, it is the number of positive anchors. Returns: dict[str, Tensor]: A dictionary of loss components. """ loss_cls_all = F.cross_entropy( cls_score, labels, reduction='none') * label_weights # FG cat_id: [0, num_classes -1], BG cat_id: num_classes pos_inds = ((labels >= 0) & (labels < self.num_classes)).nonzero( as_tuple=False).reshape(-1) neg_inds = (labels == self.num_classes).nonzero( as_tuple=False).view(-1) num_pos_samples = pos_inds.size(0) num_neg_samples = self.train_cfg.neg_pos_ratio * num_pos_samples if num_neg_samples > neg_inds.size(0): num_neg_samples = neg_inds.size(0) topk_loss_cls_neg, _ = loss_cls_all[neg_inds].topk(num_neg_samples) loss_cls_pos = loss_cls_all[pos_inds].sum() loss_cls_neg = topk_loss_cls_neg.sum() loss_cls = (loss_cls_pos + loss_cls_neg) / num_total_samples if self.reg_decoded_bbox: # When the regression loss (e.g. `IouLoss`, `GIouLoss`) # is applied directly on the decoded bounding boxes, it # decodes the already encoded coordinates to absolute format. bbox_pred = self.bbox_coder.decode(anchor, bbox_pred) loss_bbox = smooth_l1_loss( bbox_pred, bbox_targets, bbox_weights, beta=self.train_cfg.smoothl1_beta, avg_factor=num_total_samples) return loss_cls[None], loss_bbox @force_fp32(apply_to=('cls_scores', 'bbox_preds')) def loss(self, cls_scores, bbox_preds, gt_bboxes, gt_labels, img_metas, gt_bboxes_ignore=None): """Compute losses of the head. Args: cls_scores (list[Tensor]): Box scores for each scale level Has shape (N, num_anchors * num_classes, H, W) bbox_preds (list[Tensor]): Box energies / deltas for each scale level with shape (N, num_anchors * 4, H, W) gt_bboxes (list[Tensor]): each item are the truth boxes for each image in [tl_x, tl_y, br_x, br_y] format. gt_labels (list[Tensor]): class indices corresponding to each box img_metas (list[dict]): Meta information of each image, e.g., image size, scaling factor, etc. gt_bboxes_ignore (None | list[Tensor]): specify which bounding boxes can be ignored when computing the loss. Returns: dict[str, Tensor]: A dictionary of loss components. """ featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores] assert len(featmap_sizes) == self.prior_generator.num_levels device = cls_scores[0].device anchor_list, valid_flag_list = self.get_anchors( featmap_sizes, img_metas, device=device) cls_reg_targets = self.get_targets( anchor_list, valid_flag_list, gt_bboxes, img_metas, gt_bboxes_ignore_list=gt_bboxes_ignore, gt_labels_list=gt_labels, label_channels=1, unmap_outputs=True) if cls_reg_targets is None: return None (labels_list, label_weights_list, bbox_targets_list, bbox_weights_list, num_total_pos, num_total_neg) = cls_reg_targets num_images = len(img_metas) all_cls_scores = torch.cat([ s.permute(0, 2, 3, 1).reshape( num_images, -1, self.cls_out_channels) for s in cls_scores ], 1) all_labels = torch.cat(labels_list, -1).view(num_images, -1) all_label_weights = torch.cat(label_weights_list, -1).view(num_images, -1) all_bbox_preds = torch.cat([ b.permute(0, 2, 3, 1).reshape(num_images, -1, 4) for b in bbox_preds ], -2) all_bbox_targets = torch.cat(bbox_targets_list, -2).view(num_images, -1, 4) all_bbox_weights = torch.cat(bbox_weights_list, -2).view(num_images, -1, 4) # concat all level anchors to a single tensor all_anchors = [] for i in range(num_images): all_anchors.append(torch.cat(anchor_list[i])) losses_cls, losses_bbox = multi_apply( self.loss_single, all_cls_scores, all_bbox_preds, all_anchors, all_labels, all_label_weights, all_bbox_targets, all_bbox_weights, num_total_samples=num_total_pos) return dict(loss_cls=losses_cls, loss_bbox=losses_bbox)
apache-2.0
Tong-Chen/scikit-learn
sklearn/decomposition/pca.py
1
25784
""" Principal Component Analysis """ # Author: Alexandre Gramfort <alexandre.gramfort@inria.fr> # Olivier Grisel <olivier.grisel@ensta.org> # Mathieu Blondel <mathieu@mblondel.org> # Denis A. Engemann <d.engemann@fz-juelich.de> # # License: BSD 3 clause from math import log, sqrt import warnings import numpy as np from scipy import linalg from scipy import sparse from scipy.special import gammaln from ..base import BaseEstimator, TransformerMixin from ..utils import array2d, check_random_state, as_float_array from ..utils import atleast2d_or_csr from ..utils import deprecated from ..utils.sparsefuncs import mean_variance_axis0 from ..utils.extmath import (fast_logdet, safe_sparse_dot, randomized_svd, fast_dot) def _assess_dimension_(spectrum, rank, n_samples, n_features): """Compute the likelihood of a rank ``rank`` dataset The dataset is assumed to be embedded in gaussian noise of shape(n, dimf) having spectrum ``spectrum``. Parameters ---------- spectrum: array of shape (n) data spectrum rank: int, tested rank value n_samples: int, number of samples dim: int, embedding/empirical dimension Returns ------- ll: float, The log-likelihood Notes ----- This implements the method of `Thomas P. Minka: Automatic Choice of Dimensionality for PCA. NIPS 2000: 598-604` """ if rank > len(spectrum): raise ValueError("The tested rank cannot exceed the rank of the" " dataset") pu = -rank * log(2.) for i in range(rank): pu += (gammaln((n_features - i) / 2.) - log(np.pi) * (n_features - i) / 2.) pl = np.sum(np.log(spectrum[:rank])) pl = -pl * n_samples / 2. if rank == n_features: pv = 0 v = 1 else: v = np.sum(spectrum[rank:]) / (n_features - rank) pv = -np.log(v) * n_samples * (n_features - rank) / 2. m = n_features * rank - rank * (rank + 1.) / 2. pp = log(2. * np.pi) * (m + rank + 1.) / 2. pa = 0. spectrum_ = spectrum.copy() spectrum_[rank:n_features] = v for i in range(rank): for j in range(i + 1, len(spectrum)): pa += log((spectrum[i] - spectrum[j]) * (1. / spectrum_[j] - 1. / spectrum_[i])) + log(n_samples) ll = pu + pl + pv + pp - pa / 2. - rank * log(n_samples) / 2. return ll def _infer_dimension_(spectrum, n_samples, n_features): """Infers the dimension of a dataset of shape (n_samples, n_features) The dataset is described by its spectrum `spectrum`. """ n_spectrum = len(spectrum) ll = np.empty(n_spectrum) for rank in range(n_spectrum): ll[rank] = _assess_dimension_(spectrum, rank, n_samples, n_features) return ll.argmax() class PCA(BaseEstimator, TransformerMixin): """Principal component analysis (PCA) Linear dimensionality reduction using Singular Value Decomposition of the data and keeping only the most significant singular vectors to project the data to a lower dimensional space. This implementation uses the scipy.linalg implementation of the singular value decomposition. It only works for dense arrays and is not scalable to large dimensional data. The time complexity of this implementation is ``O(n ** 3)`` assuming n ~ n_samples ~ n_features. Parameters ---------- n_components : int, None or string Number of components to keep. if n_components is not set all components are kept:: n_components == min(n_samples, n_features) if n_components == 'mle', Minka\'s MLE is used to guess the dimension if ``0 < n_components < 1``, select the number of components such that the amount of variance that needs to be explained is greater than the percentage specified by n_components copy : bool If False, data passed to fit are overwritten and running fit(X).transform(X) will not yield the expected results, use fit_transform(X) instead. whiten : bool, optional When True (False by default) the `components_` vectors are divided by n_samples times singular values to ensure uncorrelated outputs with unit component-wise variances. Whitening will remove some information from the transformed signal (the relative variance scales of the components) but can sometime improve the predictive accuracy of the downstream estimators by making there data respect some hard-wired assumptions. Attributes ---------- `components_` : array, [n_components, n_features] Components with maximum variance. `explained_variance_ratio_` : array, [n_components] Percentage of variance explained by each of the selected components. \ k is not set then all components are stored and the sum of explained \ variances is equal to 1.0 `n_components_` : int The estimated number of components. Relevant when n_components is set to 'mle' or a number between 0 and 1 to select using explained variance. `noise_variance_` : float The estimated noise covariance following the Probabilistic PCA model from Tipping and Bishop 1999. See "Pattern Recognition and Machine Learning" by C. Bishop, 12.2.1 p. 574 or http://www.miketipping.com/papers/met-mppca.pdf. It is required to computed the estimated data covariance and score samples. Notes ----- For n_components='mle', this class uses the method of `Thomas P. Minka: Automatic Choice of Dimensionality for PCA. NIPS 2000: 598-604` Implements the probabilistic PCA model from: M. Tipping and C. Bishop, Probabilistic Principal Component Analysis, Journal of the Royal Statistical Society, Series B, 61, Part 3, pp. 611-622 via the score and score_samples methods. See http://www.miketipping.com/papers/met-mppca.pdf Due to implementation subtleties of the Singular Value Decomposition (SVD), which is used in this implementation, running fit twice on the same matrix can lead to principal components with signs flipped (change in direction). For this reason, it is important to always use the same estimator object to transform data in a consistent fashion. Examples -------- >>> import numpy as np >>> from sklearn.decomposition import PCA >>> X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]]) >>> pca = PCA(n_components=2) >>> pca.fit(X) PCA(copy=True, n_components=2, whiten=False) >>> print(pca.explained_variance_ratio_) # doctest: +ELLIPSIS [ 0.99244... 0.00755...] See also -------- ProbabilisticPCA RandomizedPCA KernelPCA SparsePCA TruncatedSVD """ def __init__(self, n_components=None, copy=True, whiten=False): self.n_components = n_components self.copy = copy self.whiten = whiten def fit(self, X, y=None): """Fit the model with X. Parameters ---------- X: array-like, shape (n_samples, n_features) Training data, where n_samples in the number of samples and n_features is the number of features. Returns ------- self : object Returns the instance itself. """ self._fit(X) return self def fit_transform(self, X, y=None): """Fit the model with X and apply the dimensionality reduction on X. Parameters ---------- X : array-like, shape (n_samples, n_features) Training data, where n_samples is the number of samples and n_features is the number of features. Returns ------- X_new : array-like, shape (n_samples, n_components) """ U, S, V = self._fit(X) U = U[:, :self.n_components_] if self.whiten: # X_new = X * V / S * sqrt(n_samples) = U * sqrt(n_samples) U *= sqrt(X.shape[0]) else: # X_new = X * V = U * S * V^T * V = U * S U *= S[:self.n_components_] return U def _fit(self, X): """Fit the model on X Parameters ---------- X: array-like, shape (n_samples, n_features) Training vector, where n_samples in the number of samples and n_features is the number of features. Returns ------- U, s, V : ndarrays The SVD of the input data, copied and centered when requested. """ X = array2d(X) n_samples, n_features = X.shape X = as_float_array(X, copy=self.copy) # Center data self.mean_ = np.mean(X, axis=0) X -= self.mean_ U, S, V = linalg.svd(X, full_matrices=False) explained_variance_ = (S ** 2) / n_samples explained_variance_ratio_ = (explained_variance_ / explained_variance_.sum()) if self.whiten: components_ = V / (S[:, np.newaxis] / sqrt(n_samples)) else: components_ = V n_components = self.n_components if n_components is None: n_components = n_features elif n_components == 'mle': if n_samples < n_features: raise ValueError("n_components='mle' is only supported " "if n_samples >= n_features") n_components = _infer_dimension_(explained_variance_, n_samples, n_features) if 0 < n_components < 1.0: # number of components for which the cumulated explained variance # percentage is superior to the desired threshold ratio_cumsum = explained_variance_ratio_.cumsum() n_components = np.sum(ratio_cumsum < n_components) + 1 # Compute noise covariance using Probabilistic PCA model # The sigma2 maximum likelihood (cf. eq. 12.46) if n_components < n_features: self.noise_variance_ = explained_variance_[n_components:].mean() else: self.noise_variance_ = 0. # store n_samples to revert whitening when getting covariance self.n_samples_ = n_samples self.components_ = components_[:n_components] self.explained_variance_ = explained_variance_[:n_components] explained_variance_ratio_ = explained_variance_ratio_[:n_components] self.explained_variance_ratio_ = explained_variance_ratio_ self.n_components_ = n_components return (U, S, V) def get_covariance(self): """Compute data covariance with the generative model. ``cov = components_.T * S**2 * components_ + sigma2 * eye(n_features)`` where S**2 contains the explained variances. Returns ------- cov : array, shape=(n_features, n_features) Estimated covariance of data. """ components_ = self.components_ exp_var = self.explained_variance_ if self.whiten: components_ = components_ * np.sqrt(exp_var[:, np.newaxis]) exp_var_diff = np.maximum(exp_var - self.noise_variance_, 0.) cov = np.dot(components_.T * exp_var_diff, components_) cov.flat[::len(cov) + 1] += self.noise_variance_ # modify diag inplace return cov def get_precision(self): """Compute data precision matrix with the generative model. Equals the inverse of the covariance but computed with the matrix inversion lemma for efficiency. Returns ------- precision : array, shape=(n_features, n_features) Estimated precision of data. """ n_features = self.components_.shape[1] # handle corner cases first if self.n_components_ == 0: return np.eye(n_features) / self.noise_variance_ if self.n_components_ == n_features: return linalg.inv(self.get_covariance()) # Get precision using matrix inversion lemma components_ = self.components_ exp_var = self.explained_variance_ if self.whiten: components_ = components_ * np.sqrt(exp_var[:, np.newaxis]) exp_var_diff = np.maximum(exp_var - self.noise_variance_, 0.) precision = np.dot(components_, components_.T) / self.noise_variance_ precision.flat[::len(precision) + 1] += 1. / exp_var_diff precision = np.dot(components_.T, np.dot(linalg.inv(precision), components_)) precision /= -(self.noise_variance_ ** 2) precision.flat[::len(precision) + 1] += 1. / self.noise_variance_ return precision def transform(self, X): """Apply the dimensionality reduction on X. Parameters ---------- X : array-like, shape (n_samples, n_features) New data, where n_samples is the number of samples and n_features is the number of features. Returns ------- X_new : array-like, shape (n_samples, n_components) """ X = array2d(X) if self.mean_ is not None: X = X - self.mean_ X_transformed = fast_dot(X, self.components_.T) return X_transformed def inverse_transform(self, X): """Transform data back to its original space, i.e., return an input X_original whose transform would be X Parameters ---------- X : array-like, shape (n_samples, n_components) New data, where n_samples is the number of samples and n_components is the number of components. Returns ------- X_original array-like, shape (n_samples, n_features) Notes ----- If whitening is enabled, inverse_transform does not compute the exact inverse operation as transform. """ return fast_dot(X, self.components_) + self.mean_ def score_samples(self, X): """Return the log-likelihood of each sample See. "Pattern Recognition and Machine Learning" by C. Bishop, 12.2.1 p. 574 or http://www.miketipping.com/papers/met-mppca.pdf Parameters ---------- X: array, shape(n_samples, n_features) The data. Returns ------- ll: array, shape (n_samples,) Log-likelihood of each sample under the current model """ Xr = X - self.mean_ n_features = X.shape[1] log_like = np.zeros(X.shape[0]) precision = self.get_precision() log_like = -.5 * (Xr * (np.dot(Xr, precision))).sum(axis=1) log_like -= .5 * (n_features * log(2. * np.pi) - fast_logdet(precision)) return log_like def score(self, X, y=None): """Return the average log-likelihood of all samples See. "Pattern Recognition and Machine Learning" by C. Bishop, 12.2.1 p. 574 or http://www.miketipping.com/papers/met-mppca.pdf Parameters ---------- X: array, shape(n_samples, n_features) The data. Returns ------- ll: float Average log-likelihood of the samples under the current model """ return np.mean(self.score_samples(X)) @deprecated("ProbabilisticPCA will be removed in 0.16. WARNING: the covariance" " estimation was previously incorrect, your output might be different " " than under the previous versions. Use PCA that implements score" " and score_samples. To work with homoscedastic=False, you should use" " FactorAnalysis.") class ProbabilisticPCA(PCA): """Additional layer on top of PCA that adds a probabilistic evaluation""" __doc__ += PCA.__doc__ def fit(self, X, y=None, homoscedastic=True): """Additionally to PCA.fit, learns a covariance model Parameters ---------- X : array of shape(n_samples, n_features) The data to fit homoscedastic : bool, optional, If True, average variance across remaining dimensions """ PCA.fit(self, X) n_samples, n_features = X.shape n_components = self.n_components if n_components is None: n_components = n_features explained_variance = self.explained_variance_.copy() if homoscedastic: explained_variance -= self.noise_variance_ # Make the low rank part of the estimated covariance self.covariance_ = np.dot(self.components_[:n_components].T * explained_variance, self.components_[:n_components]) if n_features == n_components: delta = 0. elif homoscedastic: delta = self.noise_variance_ else: Xr = X - self.mean_ Xr -= np.dot(np.dot(Xr, self.components_.T), self.components_) delta = (Xr ** 2).mean(axis=0) / (n_features - n_components) # Add delta to the diagonal without extra allocation self.covariance_.flat[::n_features + 1] += delta return self def score(self, X, y=None): """Return a score associated to new data Parameters ---------- X: array of shape(n_samples, n_features) The data to test Returns ------- ll: array of shape (n_samples), log-likelihood of each row of X under the current model """ Xr = X - self.mean_ n_features = X.shape[1] log_like = np.zeros(X.shape[0]) self.precision_ = linalg.inv(self.covariance_) log_like = -.5 * (Xr * (np.dot(Xr, self.precision_))).sum(axis=1) log_like -= .5 * (fast_logdet(self.covariance_) + n_features * log(2. * np.pi)) return log_like class RandomizedPCA(BaseEstimator, TransformerMixin): """Principal component analysis (PCA) using randomized SVD Linear dimensionality reduction using approximated Singular Value Decomposition of the data and keeping only the most significant singular vectors to project the data to a lower dimensional space. Parameters ---------- n_components : int, optional Maximum number of components to keep. When not given or None, this is set to n_features (the second dimension of the training data). copy : bool If False, data passed to fit are overwritten and running fit(X).transform(X) will not yield the expected results, use fit_transform(X) instead. iterated_power : int, optional Number of iterations for the power method. 3 by default. whiten : bool, optional When True (False by default) the `components_` vectors are divided by the singular values to ensure uncorrelated outputs with unit component-wise variances. Whitening will remove some information from the transformed signal (the relative variance scales of the components) but can sometime improve the predictive accuracy of the downstream estimators by making their data respect some hard-wired assumptions. random_state : int or RandomState instance or None (default) Pseudo Random Number generator seed control. If None, use the numpy.random singleton. Attributes ---------- `components_` : array, [n_components, n_features] Components with maximum variance. `explained_variance_ratio_` : array, [n_components] Percentage of variance explained by each of the selected components. \ k is not set then all components are stored and the sum of explained \ variances is equal to 1.0 Examples -------- >>> import numpy as np >>> from sklearn.decomposition import RandomizedPCA >>> X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]]) >>> pca = RandomizedPCA(n_components=2) >>> pca.fit(X) # doctest: +ELLIPSIS +NORMALIZE_WHITESPACE RandomizedPCA(copy=True, iterated_power=3, n_components=2, random_state=None, whiten=False) >>> print(pca.explained_variance_ratio_) # doctest: +ELLIPSIS [ 0.99244... 0.00755...] See also -------- PCA ProbabilisticPCA TruncatedSVD References ---------- .. [Halko2009] `Finding structure with randomness: Stochastic algorithms for constructing approximate matrix decompositions Halko, et al., 2009 (arXiv:909)` .. [MRT] `A randomized algorithm for the decomposition of matrices Per-Gunnar Martinsson, Vladimir Rokhlin and Mark Tygert` Notes ----- This class supports sparse matrix input for backward compatibility, but actually computes a truncated SVD instead of a PCA in that case (i.e. no centering is performed). This support is deprecated; use the class TruncatedSVD for sparse matrix support. """ def __init__(self, n_components=None, copy=True, iterated_power=3, whiten=False, random_state=None): self.n_components = n_components self.copy = copy self.iterated_power = iterated_power self.whiten = whiten self.mean_ = None self.random_state = random_state def fit(self, X, y=None): """Fit the model with X. Parameters ---------- X: array-like, shape (n_samples, n_features) Training data, where n_samples in the number of samples and n_features is the number of features. Returns ------- self : object Returns the instance itself. """ self._fit(X) return self def _fit(self, X): """Fit the model to the data X. Parameters ---------- X: array-like, shape (n_samples, n_features) Training vector, where n_samples in the number of samples and n_features is the number of features. Returns ------- X : ndarray, shape (n_samples, n_features) The input data, copied, centered and whitened when requested. """ random_state = check_random_state(self.random_state) if sparse.issparse(X): warnings.warn("Sparse matrix support is deprecated" " and will be dropped in 0.16." " Use TruncatedSVD instead.", DeprecationWarning) else: # not a sparse matrix, ensure this is a 2D array X = np.atleast_2d(as_float_array(X, copy=self.copy)) n_samples = X.shape[0] if not sparse.issparse(X): # Center data self.mean_ = np.mean(X, axis=0) X -= self.mean_ if self.n_components is None: n_components = X.shape[1] else: n_components = self.n_components U, S, V = randomized_svd(X, n_components, n_iter=self.iterated_power, random_state=random_state) self.explained_variance_ = exp_var = (S ** 2) / n_samples if sparse.issparse(X): _, full_var = mean_variance_axis0(X) full_var = full_var.sum() else: full_var = np.var(X, axis=0).sum() self.explained_variance_ratio_ = exp_var / full_var if self.whiten: self.components_ = V / S[:, np.newaxis] * sqrt(n_samples) else: self.components_ = V return X def transform(self, X, y=None): """Apply dimensionality reduction on X. Parameters ---------- X : array-like, shape (n_samples, n_features) New data, where n_samples in the number of samples and n_features is the number of features. Returns ------- X_new : array-like, shape (n_samples, n_components) """ # XXX remove scipy.sparse support here in 0.16 X = atleast2d_or_csr(X) if self.mean_ is not None: X = X - self.mean_ X = safe_sparse_dot(X, self.components_.T) return X def fit_transform(self, X, y=None): """Apply dimensionality reduction on X. Parameters ---------- X : array-like, shape (n_samples, n_features) New data, where n_samples in the number of samples and n_features is the number of features. Returns ------- X_new : array-like, shape (n_samples, n_components) """ X = self._fit(atleast2d_or_csr(X)) X = safe_sparse_dot(X, self.components_.T) return X def inverse_transform(self, X, y=None): """Transform data back to its original space. Returns an array X_original whose transform would be X. Parameters ---------- X : array-like, shape (n_samples, n_components) New data, where n_samples in the number of samples and n_components is the number of components. Returns ------- X_original array-like, shape (n_samples, n_features) Notes ----- If whitening is enabled, inverse_transform does not compute the exact inverse operation of transform. """ # XXX remove scipy.sparse support here in 0.16 X_original = safe_sparse_dot(X, self.components_) if self.mean_ is not None: X_original = X_original + self.mean_ return X_original
bsd-3-clause
djgagne/scikit-learn
sklearn/linear_model/tests/test_least_angle.py
98
20870
from nose.tools import assert_equal import numpy as np from scipy import linalg from sklearn.cross_validation import train_test_split from sklearn.utils.testing import assert_array_almost_equal from sklearn.utils.testing import assert_true from sklearn.utils.testing import assert_less from sklearn.utils.testing import assert_greater from sklearn.utils.testing import assert_raises from sklearn.utils.testing import ignore_warnings from sklearn.utils.testing import assert_no_warnings, assert_warns from sklearn.utils.testing import TempMemmap from sklearn.utils import ConvergenceWarning from sklearn import linear_model, datasets from sklearn.linear_model.least_angle import _lars_path_residues diabetes = datasets.load_diabetes() X, y = diabetes.data, diabetes.target # TODO: use another dataset that has multiple drops def test_simple(): # Principle of Lars is to keep covariances tied and decreasing # also test verbose output from sklearn.externals.six.moves import cStringIO as StringIO import sys old_stdout = sys.stdout try: sys.stdout = StringIO() alphas_, active, coef_path_ = linear_model.lars_path( diabetes.data, diabetes.target, method="lar", verbose=10) sys.stdout = old_stdout for (i, coef_) in enumerate(coef_path_.T): res = y - np.dot(X, coef_) cov = np.dot(X.T, res) C = np.max(abs(cov)) eps = 1e-3 ocur = len(cov[C - eps < abs(cov)]) if i < X.shape[1]: assert_true(ocur == i + 1) else: # no more than max_pred variables can go into the active set assert_true(ocur == X.shape[1]) finally: sys.stdout = old_stdout def test_simple_precomputed(): # The same, with precomputed Gram matrix G = np.dot(diabetes.data.T, diabetes.data) alphas_, active, coef_path_ = linear_model.lars_path( diabetes.data, diabetes.target, Gram=G, method="lar") for i, coef_ in enumerate(coef_path_.T): res = y - np.dot(X, coef_) cov = np.dot(X.T, res) C = np.max(abs(cov)) eps = 1e-3 ocur = len(cov[C - eps < abs(cov)]) if i < X.shape[1]: assert_true(ocur == i + 1) else: # no more than max_pred variables can go into the active set assert_true(ocur == X.shape[1]) def test_all_precomputed(): # Test that lars_path with precomputed Gram and Xy gives the right answer X, y = diabetes.data, diabetes.target G = np.dot(X.T, X) Xy = np.dot(X.T, y) for method in 'lar', 'lasso': output = linear_model.lars_path(X, y, method=method) output_pre = linear_model.lars_path(X, y, Gram=G, Xy=Xy, method=method) for expected, got in zip(output, output_pre): assert_array_almost_equal(expected, got) def test_lars_lstsq(): # Test that Lars gives least square solution at the end # of the path X1 = 3 * diabetes.data # use un-normalized dataset clf = linear_model.LassoLars(alpha=0.) clf.fit(X1, y) coef_lstsq = np.linalg.lstsq(X1, y)[0] assert_array_almost_equal(clf.coef_, coef_lstsq) def test_lasso_gives_lstsq_solution(): # Test that Lars Lasso gives least square solution at the end # of the path alphas_, active, coef_path_ = linear_model.lars_path(X, y, method="lasso") coef_lstsq = np.linalg.lstsq(X, y)[0] assert_array_almost_equal(coef_lstsq, coef_path_[:, -1]) def test_collinearity(): # Check that lars_path is robust to collinearity in input X = np.array([[3., 3., 1.], [2., 2., 0.], [1., 1., 0]]) y = np.array([1., 0., 0]) f = ignore_warnings _, _, coef_path_ = f(linear_model.lars_path)(X, y, alpha_min=0.01) assert_true(not np.isnan(coef_path_).any()) residual = np.dot(X, coef_path_[:, -1]) - y assert_less((residual ** 2).sum(), 1.) # just make sure it's bounded n_samples = 10 X = np.random.rand(n_samples, 5) y = np.zeros(n_samples) _, _, coef_path_ = linear_model.lars_path(X, y, Gram='auto', copy_X=False, copy_Gram=False, alpha_min=0., method='lasso', verbose=0, max_iter=500) assert_array_almost_equal(coef_path_, np.zeros_like(coef_path_)) def test_no_path(): # Test that the ``return_path=False`` option returns the correct output alphas_, active_, coef_path_ = linear_model.lars_path( diabetes.data, diabetes.target, method="lar") alpha_, active, coef = linear_model.lars_path( diabetes.data, diabetes.target, method="lar", return_path=False) assert_array_almost_equal(coef, coef_path_[:, -1]) assert_true(alpha_ == alphas_[-1]) def test_no_path_precomputed(): # Test that the ``return_path=False`` option with Gram remains correct G = np.dot(diabetes.data.T, diabetes.data) alphas_, active_, coef_path_ = linear_model.lars_path( diabetes.data, diabetes.target, method="lar", Gram=G) alpha_, active, coef = linear_model.lars_path( diabetes.data, diabetes.target, method="lar", Gram=G, return_path=False) assert_array_almost_equal(coef, coef_path_[:, -1]) assert_true(alpha_ == alphas_[-1]) def test_no_path_all_precomputed(): # Test that the ``return_path=False`` option with Gram and Xy remains # correct X, y = 3 * diabetes.data, diabetes.target G = np.dot(X.T, X) Xy = np.dot(X.T, y) alphas_, active_, coef_path_ = linear_model.lars_path( X, y, method="lasso", Gram=G, Xy=Xy, alpha_min=0.9) print("---") alpha_, active, coef = linear_model.lars_path( X, y, method="lasso", Gram=G, Xy=Xy, alpha_min=0.9, return_path=False) assert_array_almost_equal(coef, coef_path_[:, -1]) assert_true(alpha_ == alphas_[-1]) def test_singular_matrix(): # Test when input is a singular matrix X1 = np.array([[1, 1.], [1., 1.]]) y1 = np.array([1, 1]) alphas, active, coef_path = linear_model.lars_path(X1, y1) assert_array_almost_equal(coef_path.T, [[0, 0], [1, 0]]) def test_rank_deficient_design(): # consistency test that checks that LARS Lasso is handling rank # deficient input data (with n_features < rank) in the same way # as coordinate descent Lasso y = [5, 0, 5] for X in ([[5, 0], [0, 5], [10, 10]], [[10, 10, 0], [1e-32, 0, 0], [0, 0, 1]], ): # To be able to use the coefs to compute the objective function, # we need to turn off normalization lars = linear_model.LassoLars(.1, normalize=False) coef_lars_ = lars.fit(X, y).coef_ obj_lars = (1. / (2. * 3.) * linalg.norm(y - np.dot(X, coef_lars_)) ** 2 + .1 * linalg.norm(coef_lars_, 1)) coord_descent = linear_model.Lasso(.1, tol=1e-6, normalize=False) coef_cd_ = coord_descent.fit(X, y).coef_ obj_cd = ((1. / (2. * 3.)) * linalg.norm(y - np.dot(X, coef_cd_)) ** 2 + .1 * linalg.norm(coef_cd_, 1)) assert_less(obj_lars, obj_cd * (1. + 1e-8)) def test_lasso_lars_vs_lasso_cd(verbose=False): # Test that LassoLars and Lasso using coordinate descent give the # same results. X = 3 * diabetes.data alphas, _, lasso_path = linear_model.lars_path(X, y, method='lasso') lasso_cd = linear_model.Lasso(fit_intercept=False, tol=1e-8) for c, a in zip(lasso_path.T, alphas): if a == 0: continue lasso_cd.alpha = a lasso_cd.fit(X, y) error = linalg.norm(c - lasso_cd.coef_) assert_less(error, 0.01) # similar test, with the classifiers for alpha in np.linspace(1e-2, 1 - 1e-2, 20): clf1 = linear_model.LassoLars(alpha=alpha, normalize=False).fit(X, y) clf2 = linear_model.Lasso(alpha=alpha, tol=1e-8, normalize=False).fit(X, y) err = linalg.norm(clf1.coef_ - clf2.coef_) assert_less(err, 1e-3) # same test, with normalized data X = diabetes.data alphas, _, lasso_path = linear_model.lars_path(X, y, method='lasso') lasso_cd = linear_model.Lasso(fit_intercept=False, normalize=True, tol=1e-8) for c, a in zip(lasso_path.T, alphas): if a == 0: continue lasso_cd.alpha = a lasso_cd.fit(X, y) error = linalg.norm(c - lasso_cd.coef_) assert_less(error, 0.01) def test_lasso_lars_vs_lasso_cd_early_stopping(verbose=False): # Test that LassoLars and Lasso using coordinate descent give the # same results when early stopping is used. # (test : before, in the middle, and in the last part of the path) alphas_min = [10, 0.9, 1e-4] for alphas_min in alphas_min: alphas, _, lasso_path = linear_model.lars_path(X, y, method='lasso', alpha_min=0.9) lasso_cd = linear_model.Lasso(fit_intercept=False, tol=1e-8) lasso_cd.alpha = alphas[-1] lasso_cd.fit(X, y) error = linalg.norm(lasso_path[:, -1] - lasso_cd.coef_) assert_less(error, 0.01) alphas_min = [10, 0.9, 1e-4] # same test, with normalization for alphas_min in alphas_min: alphas, _, lasso_path = linear_model.lars_path(X, y, method='lasso', alpha_min=0.9) lasso_cd = linear_model.Lasso(fit_intercept=True, normalize=True, tol=1e-8) lasso_cd.alpha = alphas[-1] lasso_cd.fit(X, y) error = linalg.norm(lasso_path[:, -1] - lasso_cd.coef_) assert_less(error, 0.01) def test_lasso_lars_path_length(): # Test that the path length of the LassoLars is right lasso = linear_model.LassoLars() lasso.fit(X, y) lasso2 = linear_model.LassoLars(alpha=lasso.alphas_[2]) lasso2.fit(X, y) assert_array_almost_equal(lasso.alphas_[:3], lasso2.alphas_) # Also check that the sequence of alphas is always decreasing assert_true(np.all(np.diff(lasso.alphas_) < 0)) def test_lasso_lars_vs_lasso_cd_ill_conditioned(): # Test lasso lars on a very ill-conditioned design, and check that # it does not blow up, and stays somewhat close to a solution given # by the coordinate descent solver # Also test that lasso_path (using lars_path output style) gives # the same result as lars_path and previous lasso output style # under these conditions. rng = np.random.RandomState(42) # Generate data n, m = 70, 100 k = 5 X = rng.randn(n, m) w = np.zeros((m, 1)) i = np.arange(0, m) rng.shuffle(i) supp = i[:k] w[supp] = np.sign(rng.randn(k, 1)) * (rng.rand(k, 1) + 1) y = np.dot(X, w) sigma = 0.2 y += sigma * rng.rand(*y.shape) y = y.squeeze() lars_alphas, _, lars_coef = linear_model.lars_path(X, y, method='lasso') _, lasso_coef2, _ = linear_model.lasso_path(X, y, alphas=lars_alphas, tol=1e-6, fit_intercept=False) assert_array_almost_equal(lars_coef, lasso_coef2, decimal=1) def test_lasso_lars_vs_lasso_cd_ill_conditioned2(): # Create an ill-conditioned situation in which the LARS has to go # far in the path to converge, and check that LARS and coordinate # descent give the same answers # Note it used to be the case that Lars had to use the drop for good # strategy for this but this is no longer the case with the # equality_tolerance checks X = [[1e20, 1e20, 0], [-1e-32, 0, 0], [1, 1, 1]] y = [10, 10, 1] alpha = .0001 def objective_function(coef): return (1. / (2. * len(X)) * linalg.norm(y - np.dot(X, coef)) ** 2 + alpha * linalg.norm(coef, 1)) lars = linear_model.LassoLars(alpha=alpha, normalize=False) assert_warns(ConvergenceWarning, lars.fit, X, y) lars_coef_ = lars.coef_ lars_obj = objective_function(lars_coef_) coord_descent = linear_model.Lasso(alpha=alpha, tol=1e-10, normalize=False) cd_coef_ = coord_descent.fit(X, y).coef_ cd_obj = objective_function(cd_coef_) assert_less(lars_obj, cd_obj * (1. + 1e-8)) def test_lars_add_features(): # assure that at least some features get added if necessary # test for 6d2b4c # Hilbert matrix n = 5 H = 1. / (np.arange(1, n + 1) + np.arange(n)[:, np.newaxis]) clf = linear_model.Lars(fit_intercept=False).fit( H, np.arange(n)) assert_true(np.all(np.isfinite(clf.coef_))) def test_lars_n_nonzero_coefs(verbose=False): lars = linear_model.Lars(n_nonzero_coefs=6, verbose=verbose) lars.fit(X, y) assert_equal(len(lars.coef_.nonzero()[0]), 6) # The path should be of length 6 + 1 in a Lars going down to 6 # non-zero coefs assert_equal(len(lars.alphas_), 7) def test_multitarget(): # Assure that estimators receiving multidimensional y do the right thing X = diabetes.data Y = np.vstack([diabetes.target, diabetes.target ** 2]).T n_targets = Y.shape[1] for estimator in (linear_model.LassoLars(), linear_model.Lars()): estimator.fit(X, Y) Y_pred = estimator.predict(X) Y_dec = estimator.decision_function(X) assert_array_almost_equal(Y_pred, Y_dec) alphas, active, coef, path = (estimator.alphas_, estimator.active_, estimator.coef_, estimator.coef_path_) for k in range(n_targets): estimator.fit(X, Y[:, k]) y_pred = estimator.predict(X) assert_array_almost_equal(alphas[k], estimator.alphas_) assert_array_almost_equal(active[k], estimator.active_) assert_array_almost_equal(coef[k], estimator.coef_) assert_array_almost_equal(path[k], estimator.coef_path_) assert_array_almost_equal(Y_pred[:, k], y_pred) def test_lars_cv(): # Test the LassoLarsCV object by checking that the optimal alpha # increases as the number of samples increases. # This property is not actually garantied in general and is just a # property of the given dataset, with the given steps chosen. old_alpha = 0 lars_cv = linear_model.LassoLarsCV() for length in (400, 200, 100): X = diabetes.data[:length] y = diabetes.target[:length] lars_cv.fit(X, y) np.testing.assert_array_less(old_alpha, lars_cv.alpha_) old_alpha = lars_cv.alpha_ def test_lasso_lars_ic(): # Test the LassoLarsIC object by checking that # - some good features are selected. # - alpha_bic > alpha_aic # - n_nonzero_bic < n_nonzero_aic lars_bic = linear_model.LassoLarsIC('bic') lars_aic = linear_model.LassoLarsIC('aic') rng = np.random.RandomState(42) X = diabetes.data y = diabetes.target X = np.c_[X, rng.randn(X.shape[0], 4)] # add 4 bad features lars_bic.fit(X, y) lars_aic.fit(X, y) nonzero_bic = np.where(lars_bic.coef_)[0] nonzero_aic = np.where(lars_aic.coef_)[0] assert_greater(lars_bic.alpha_, lars_aic.alpha_) assert_less(len(nonzero_bic), len(nonzero_aic)) assert_less(np.max(nonzero_bic), diabetes.data.shape[1]) # test error on unknown IC lars_broken = linear_model.LassoLarsIC('<unknown>') assert_raises(ValueError, lars_broken.fit, X, y) def test_no_warning_for_zero_mse(): # LassoLarsIC should not warn for log of zero MSE. y = np.arange(10, dtype=float) X = y.reshape(-1, 1) lars = linear_model.LassoLarsIC(normalize=False) assert_no_warnings(lars.fit, X, y) assert_true(np.any(np.isinf(lars.criterion_))) def test_lars_path_readonly_data(): # When using automated memory mapping on large input, the # fold data is in read-only mode # This is a non-regression test for: # https://github.com/scikit-learn/scikit-learn/issues/4597 splitted_data = train_test_split(X, y, random_state=42) with TempMemmap(splitted_data) as (X_train, X_test, y_train, y_test): # The following should not fail despite copy=False _lars_path_residues(X_train, y_train, X_test, y_test, copy=False) def test_lars_path_positive_constraint(): # this is the main test for the positive parameter on the lars_path method # the estimator classes just make use of this function # we do the test on the diabetes dataset # ensure that we get negative coefficients when positive=False # and all positive when positive=True # for method 'lar' (default) and lasso for method in ['lar', 'lasso']: alpha, active, coefs = \ linear_model.lars_path(diabetes['data'], diabetes['target'], return_path=True, method=method, positive=False) assert_true(coefs.min() < 0) alpha, active, coefs = \ linear_model.lars_path(diabetes['data'], diabetes['target'], return_path=True, method=method, positive=True) assert_true(coefs.min() >= 0) # now we gonna test the positive option for all estimator classes default_parameter = {'fit_intercept': False} estimator_parameter_map = {'Lars': {'n_nonzero_coefs': 5}, 'LassoLars': {'alpha': 0.1}, 'LarsCV': {}, 'LassoLarsCV': {}, 'LassoLarsIC': {}} def test_estimatorclasses_positive_constraint(): # testing the transmissibility for the positive option of all estimator # classes in this same function here for estname in estimator_parameter_map: params = default_parameter.copy() params.update(estimator_parameter_map[estname]) estimator = getattr(linear_model, estname)(positive=False, **params) estimator.fit(diabetes['data'], diabetes['target']) assert_true(estimator.coef_.min() < 0) estimator = getattr(linear_model, estname)(positive=True, **params) estimator.fit(diabetes['data'], diabetes['target']) assert_true(min(estimator.coef_) >= 0) def test_lasso_lars_vs_lasso_cd_positive(verbose=False): # Test that LassoLars and Lasso using coordinate descent give the # same results when using the positive option # This test is basically a copy of the above with additional positive # option. However for the middle part, the comparison of coefficient values # for a range of alphas, we had to make an adaptations. See below. # not normalized data X = 3 * diabetes.data alphas, _, lasso_path = linear_model.lars_path(X, y, method='lasso', positive=True) lasso_cd = linear_model.Lasso(fit_intercept=False, tol=1e-8, positive=True) for c, a in zip(lasso_path.T, alphas): if a == 0: continue lasso_cd.alpha = a lasso_cd.fit(X, y) error = linalg.norm(c - lasso_cd.coef_) assert_less(error, 0.01) # The range of alphas chosen for coefficient comparison here is restricted # as compared with the above test without the positive option. This is due # to the circumstance that the Lars-Lasso algorithm does not converge to # the least-squares-solution for small alphas, see 'Least Angle Regression' # by Efron et al 2004. The coefficients are typically in congruence up to # the smallest alpha reached by the Lars-Lasso algorithm and start to # diverge thereafter. See # https://gist.github.com/michigraber/7e7d7c75eca694c7a6ff for alpha in np.linspace(6e-1, 1 - 1e-2, 20): clf1 = linear_model.LassoLars(fit_intercept=False, alpha=alpha, normalize=False, positive=True).fit(X, y) clf2 = linear_model.Lasso(fit_intercept=False, alpha=alpha, tol=1e-8, normalize=False, positive=True).fit(X, y) err = linalg.norm(clf1.coef_ - clf2.coef_) assert_less(err, 1e-3) # normalized data X = diabetes.data alphas, _, lasso_path = linear_model.lars_path(X, y, method='lasso', positive=True) lasso_cd = linear_model.Lasso(fit_intercept=False, normalize=True, tol=1e-8, positive=True) for c, a in zip(lasso_path.T[:-1], alphas[:-1]): # don't include alpha=0 lasso_cd.alpha = a lasso_cd.fit(X, y) error = linalg.norm(c - lasso_cd.coef_) assert_less(error, 0.01)
bsd-3-clause
fyffyt/scikit-learn
examples/linear_model/plot_sgd_loss_functions.py
248
1095
""" ========================== SGD: convex loss functions ========================== A plot that compares the various convex loss functions supported by :class:`sklearn.linear_model.SGDClassifier` . """ print(__doc__) import numpy as np import matplotlib.pyplot as plt def modified_huber_loss(y_true, y_pred): z = y_pred * y_true loss = -4 * z loss[z >= -1] = (1 - z[z >= -1]) ** 2 loss[z >= 1.] = 0 return loss xmin, xmax = -4, 4 xx = np.linspace(xmin, xmax, 100) plt.plot([xmin, 0, 0, xmax], [1, 1, 0, 0], 'k-', label="Zero-one loss") plt.plot(xx, np.where(xx < 1, 1 - xx, 0), 'g-', label="Hinge loss") plt.plot(xx, -np.minimum(xx, 0), 'm-', label="Perceptron loss") plt.plot(xx, np.log2(1 + np.exp(-xx)), 'r-', label="Log loss") plt.plot(xx, np.where(xx < 1, 1 - xx, 0) ** 2, 'b-', label="Squared hinge loss") plt.plot(xx, modified_huber_loss(xx, 1), 'y--', label="Modified Huber loss") plt.ylim((0, 8)) plt.legend(loc="upper right") plt.xlabel(r"Decision function $f(x)$") plt.ylabel("$L(y, f(x))$") plt.show()
bsd-3-clause
thilbern/scikit-learn
examples/linear_model/plot_sgd_loss_functions.py
248
1095
""" ========================== SGD: convex loss functions ========================== A plot that compares the various convex loss functions supported by :class:`sklearn.linear_model.SGDClassifier` . """ print(__doc__) import numpy as np import matplotlib.pyplot as plt def modified_huber_loss(y_true, y_pred): z = y_pred * y_true loss = -4 * z loss[z >= -1] = (1 - z[z >= -1]) ** 2 loss[z >= 1.] = 0 return loss xmin, xmax = -4, 4 xx = np.linspace(xmin, xmax, 100) plt.plot([xmin, 0, 0, xmax], [1, 1, 0, 0], 'k-', label="Zero-one loss") plt.plot(xx, np.where(xx < 1, 1 - xx, 0), 'g-', label="Hinge loss") plt.plot(xx, -np.minimum(xx, 0), 'm-', label="Perceptron loss") plt.plot(xx, np.log2(1 + np.exp(-xx)), 'r-', label="Log loss") plt.plot(xx, np.where(xx < 1, 1 - xx, 0) ** 2, 'b-', label="Squared hinge loss") plt.plot(xx, modified_huber_loss(xx, 1), 'y--', label="Modified Huber loss") plt.ylim((0, 8)) plt.legend(loc="upper right") plt.xlabel(r"Decision function $f(x)$") plt.ylabel("$L(y, f(x))$") plt.show()
bsd-3-clause
emrecelikten/hmm-visualization
hmm-test.py
2
2009
__author__ = 'emre' import unittest from hmm import * import sklearn.hmm import random def generate_observations(num_classes): size = random.randint(5, 30) observations = np.empty(size).astype(int) for t in xrange(size): observations[t] = random.randint(0, num_classes - 1) return observations class MyTest(unittest.TestCase): def test(self): states = [0, 1, 2] initial_probabilities = np.array([0.5, 0.3, 0.2]) transition_probabilities = np.array([ [0.7, 0.2, 0.1], [0.2, 0.6, 0.2], [0.1, 0.4, 0.5] ]) observation_probabilities = np.array([ [0.6, 0.2, 0.1, 0.05, 0.05, 0.0], [0.05, 0.1, 0.2, 0.6, 0.05, 0.0], [0.0, 0.0, 0.1, 0.1, 0.2, 0.6], ]) model = sklearn.hmm.MultinomialHMM(n_components=len(states)) model._set_startprob(initial_probabilities) model._set_transmat(transition_probabilities) model._set_emissionprob(observation_probabilities) hmm = HMM(initial_probabilities, transition_probabilities, observation_probabilities) # Run 10000 test cases with random observations for test_num in xrange(10000): observations = generate_observations(6) sklearn_probability, sklearn_states = model.decode(observations, algorithm='viterbi') sklearn_probability = np.exp(sklearn_probability) own_states, probability, delta, phi = hmm.decode(observations) # print(own_states) self.assertEqual(sklearn_states.all(), own_states.all(), 'States do not match:\n%s\n%s\nDelta:\n%s\nPhi:\n%s' % ( sklearn_states, own_states, delta, phi)) self.assertAlmostEqual(sklearn_probability, probability, msg='Probability does not match: %g != %g' % (sklearn_probability, probability), delta=1e-6)
mit
thilbern/scikit-learn
examples/exercises/plot_cv_diabetes.py
15
2528
""" =============================================== Cross-validation on diabetes Dataset Exercise =============================================== A tutorial excercise which uses cross-validation with linear models. This exercise is used in the :ref:`cv_estimators_tut` part of the :ref:`model_selection_tut` section of the :ref:`stat_learn_tut_index`. """ from __future__ import print_function print(__doc__) import numpy as np import matplotlib.pyplot as plt from sklearn import cross_validation, datasets, linear_model diabetes = datasets.load_diabetes() X = diabetes.data[:150] y = diabetes.target[:150] lasso = linear_model.Lasso() alphas = np.logspace(-4, -.5, 30) scores = list() scores_std = list() for alpha in alphas: lasso.alpha = alpha this_scores = cross_validation.cross_val_score(lasso, X, y, n_jobs=1) scores.append(np.mean(this_scores)) scores_std.append(np.std(this_scores)) plt.figure(figsize=(4, 3)) plt.semilogx(alphas, scores) # plot error lines showing +/- std. errors of the scores plt.semilogx(alphas, np.array(scores) + np.array(scores_std) / np.sqrt(len(X)), 'b--') plt.semilogx(alphas, np.array(scores) - np.array(scores_std) / np.sqrt(len(X)), 'b--') plt.ylabel('CV score') plt.xlabel('alpha') plt.axhline(np.max(scores), linestyle='--', color='.5') ############################################################################## # Bonus: how much can you trust the selection of alpha? # To answer this question we use the LassoCV object that sets its alpha # parameter automatically from the data by internal cross-validation (i.e. it # performs cross-validation on the training data it receives). # We use external cross-validation to see how much the automatically obtained # alphas differ across different cross-validation folds. lasso_cv = linear_model.LassoCV(alphas=alphas) k_fold = cross_validation.KFold(len(X), 3) print("Answer to the bonus question:", "how much can you trust the selection of alpha?") print() print("Alpha parameters maximising the generalization score on different") print("subsets of the data:") for k, (train, test) in enumerate(k_fold): lasso_cv.fit(X[train], y[train]) print("[fold {0}] alpha: {1:.5f}, score: {2:.5f}". format(k, lasso_cv.alpha_, lasso_cv.score(X[test], y[test]))) print() print("Answer: Not very much since we obtained different alphas for different") print("subsets of the data and moreover, the scores for these alphas differ") print("quite substantially.") plt.show()
bsd-3-clause
FRESNA/PyPSA
examples/scigrid-de/scigrid-lopf-then-pf.py
1
12858
# -*- coding: utf-8 -*- ## LOPF then non-linear power flow with SciGRID # # This Jupyter Notebook is also available to download at: <https://pypsa.readthedocs.io/en/latest/examples/scigrid-lopf-then-pf.ipynb> and can be viewed as an HTML page at: <https://pypsa.readthedocs.io/en/latest/examples/scigrid-lopf-then-pf.html>. # # In this example, the dispatch of generators is optimised using the linear OPF, then a non-linear power flow is run on the resulting dispatch. # # The data files for this example are in the examples folder of the github repository: <https://github.com/PyPSA/PyPSA>. # ### Data sources # # Grid: based on [SciGRID](http://scigrid.de/) Version 0.2 which is based on [OpenStreetMap](http://www.openstreetmap.org/). # # Load size and location: based on Landkreise (NUTS 3) GDP and population. # # Load time series: from ENTSO-E hourly data, scaled up uniformly by factor 1.12 (a simplification of the methodology in Schumacher, Hirth (2015)). # # Conventional power plant capacities and locations: BNetzA list. # # Wind and solar capacities and locations: EEG Stammdaten, based on http://www.energymap.info/download.html, which represents capacities at the end of 2014. Units without PLZ are removed. # # Wind and solar time series: REatlas, Andresen et al, "Validation of Danish wind time series from a new global renewable energy atlas for energy system analysis," Energy 93 (2015) 1074 - 1088. # # NB: # # All times in the dataset are UTC. # # Where SciGRID nodes have been split into 220kV and 380kV substations, all load and generation is attached to the 220kV substation. # ### Warnings # # This script and the data behind it are no longer supported. See https://github.com/PyPSA/pypsa-eur for a newer model that covers the whole of Europe. # # This dataset is ONLY intended to demonstrate the capabilities of PyPSA and is NOT (yet) accurate enough to be used for research purposes. # # Known problems include: # # i) Rough approximations have been made for missing grid data, e.g. 220kV-380kV transformers and connections between close sub-stations missing from OSM. # # ii) There appears to be some unexpected congestion in parts of the network, which may mean for example that the load attachment method (by Voronoi cell overlap with Landkreise) isn't working, particularly in regions with a high density of substations. # # iii) Attaching power plants to the nearest high voltage substation may not reflect reality. # # iv) There is no proper n-1 security in the calculations - this can either be simulated with a blanket e.g. 70% reduction in thermal limits (as done here) or a proper security constrained OPF (see e.g. <https://pypsa.readthedocs.io/en/latest/examples/scigrid-sclopf.ipynb>). # # v) The borders and neighbouring countries are not represented. # # vi) Hydroelectric power stations are not modelled accurately. # # viii) The marginal costs are illustrative, not accurate. # # ix) Only the first day of 2011 is in the github dataset, which is not representative. The full year of 2011 can be downloaded at <https://pypsa.readthedocs.io/en/latest/examples/scigrid-with-load-gen-trafos-2011.zip>. # # x) The ENTSO-E total load for Germany may not be scaled correctly; it is scaled up uniformly by factor 1.12 (a simplification of the methodology in Schumacher, Hirth (2015), which suggests monthly factors). # # xi) Biomass from the EEG Stammdaten are not read in at the moment. # # xii) Power plant start up costs, ramping limits/costs, minimum loading rates are not considered. import os import cartopy.crs as ccrs import matplotlib.pyplot as plt import numpy as np import pandas as pd import pypsa #%matplotlib inline # You may have to adjust this path to where # you downloaded the github repository # https://github.com/PyPSA/PyPSA csv_folder_name = ( os.path.dirname(pypsa.__file__) + "/../examples/scigrid-de/scigrid-with-load-gen-trafos/" ) network = pypsa.Network(csv_folder_name=csv_folder_name) ### Plot the distribution of the load and of generating tech fig, ax = plt.subplots(1, 1, subplot_kw={"projection": ccrs.PlateCarree()}) fig.set_size_inches(6, 6) load_distribution = ( network.loads_t.p_set.loc[network.snapshots[0]].groupby(network.loads.bus).sum() ) network.plot(bus_sizes=0.5 * load_distribution, ax=ax, title="Load distribution") fig.tight_layout() # fig.savefig('load-distribution.png') network.generators.groupby("carrier")["p_nom"].sum() network.storage_units.groupby("carrier")["p_nom"].sum() techs = ["Gas", "Brown Coal", "Hard Coal", "Wind Offshore", "Wind Onshore", "Solar"] n_graphs = len(techs) n_cols = 3 if n_graphs % n_cols == 0: n_rows = n_graphs // n_cols else: n_rows = n_graphs // n_cols + 1 fig, axes = plt.subplots( nrows=n_rows, ncols=n_cols, subplot_kw={"projection": ccrs.PlateCarree()} ) size = 4 fig.set_size_inches(size * n_cols, size * n_rows) for i, tech in enumerate(techs): i_row = i // n_cols i_col = i % n_cols ax = axes[i_row, i_col] gens = network.generators[network.generators.carrier == tech] gen_distribution = ( gens.groupby("bus").sum()["p_nom"].reindex(network.buses.index, fill_value=0.0) ) network.plot(ax=ax, bus_sizes=0.2 * gen_distribution) ax.set_title(tech) ### Run Linear Optimal Power Flow on the first day of 2011 # to approximate n-1 security and allow room for reactive power flows, # don't allow any line to be loaded above 70% of their thermal rating contingency_factor = 0.7 network.lines.s_max_pu = contingency_factor # There are some infeasibilities without small extensions network.lines.loc[["316", "527", "602"], "s_nom"] = 1715 # the lines to extend to resolve infeasibilities can # be found by # uncommenting the lines below to allow the network to be extended # network.lines["s_nom_original"] = network.lines.s_nom # network.lines.s_nom_extendable = True # network.lines.s_nom_min = network.lines.s_nom # Assume 450 EUR/MVA/km # network.lines.capital_cost = 450*network.lines.length group_size = 4 solver_name = "cbc" print("Performing linear OPF for one day, {} snapshots at a time:".format(group_size)) network.storage_units.state_of_charge_initial = 0.0 for i in range(int(24 / group_size)): # set the initial state of charge based on previous round if i > 0: network.storage_units.state_of_charge_initial = ( network.storage_units_t.state_of_charge.loc[ network.snapshots[group_size * i - 1] ] ) network.lopf( network.snapshots[group_size * i : group_size * i + group_size], solver_name=solver_name, keep_files=True, ) # network.lines.s_nom = network.lines.s_nom_opt # if lines are extended, look at which ones are bigger # network.lines[["s_nom_original","s_nom"]][abs(network.lines.s_nom - network.lines.s_nom_original) > 1] p_by_carrier = network.generators_t.p.groupby(network.generators.carrier, axis=1).sum() p_by_carrier.drop( (p_by_carrier.max()[p_by_carrier.max() < 1700.0]).index, axis=1, inplace=True ) p_by_carrier.columns colors = { "Brown Coal": "brown", "Hard Coal": "k", "Nuclear": "r", "Run of River": "green", "Wind Onshore": "blue", "Solar": "yellow", "Wind Offshore": "cyan", "Waste": "orange", "Gas": "orange", } # reorder cols = [ "Nuclear", "Run of River", "Brown Coal", "Hard Coal", "Gas", "Wind Offshore", "Wind Onshore", "Solar", ] p_by_carrier = p_by_carrier[cols] fig, ax = plt.subplots(1, 1) fig.set_size_inches(12, 6) (p_by_carrier / 1e3).plot( kind="area", ax=ax, linewidth=4, colors=[colors[col] for col in p_by_carrier.columns], ) ax.legend(ncol=4, loc="upper left") ax.set_ylabel("GW") ax.set_xlabel("") fig.tight_layout() # fig.savefig("stacked-gen.png") fig, ax = plt.subplots(1, 1) fig.set_size_inches(12, 6) p_storage = network.storage_units_t.p.sum(axis=1) state_of_charge = network.storage_units_t.state_of_charge.sum(axis=1) p_storage.plot(label="Pumped hydro dispatch", ax=ax, linewidth=3) state_of_charge.plot(label="State of charge", ax=ax, linewidth=3) ax.legend() ax.grid() ax.set_ylabel("MWh") ax.set_xlabel("") fig.tight_layout() # fig.savefig("storage-scigrid.png") now = network.snapshots[4] print("With the linear load flow, there is the following per unit loading:") loading = network.lines_t.p0.loc[now] / network.lines.s_nom print(loading.describe()) fig, ax = plt.subplots(1, 1, subplot_kw={"projection": ccrs.PlateCarree()}) fig.set_size_inches(6, 6) network.plot( ax=ax, line_colors=abs(loading), line_cmap=plt.cm.jet, title="Line loading" ) fig.tight_layout() # fig.savefig("line-loading.png") network.buses_t.marginal_price.loc[now].describe() fig, ax = plt.subplots(1, 1, subplot_kw={"projection": ccrs.PlateCarree()}) fig.set_size_inches(6, 4) network.plot(ax=ax, line_widths=pd.Series(0.5, network.lines.index)) plt.hexbin( network.buses.x, network.buses.y, gridsize=20, C=network.buses_t.marginal_price.loc[now], cmap=plt.cm.jet, ) # for some reason the colorbar only works with graphs plt.plot # and must be attached plt.colorbar cb = plt.colorbar() cb.set_label("Locational Marginal Price (EUR/MWh)") fig.tight_layout() # fig.savefig('lmp.png') ### Look at variable curtailment carrier = "Wind Onshore" capacity = network.generators.groupby("carrier").sum().at[carrier, "p_nom"] p_available = network.generators_t.p_max_pu.multiply(network.generators["p_nom"]) p_available_by_carrier = p_available.groupby(network.generators.carrier, axis=1).sum() p_curtailed_by_carrier = p_available_by_carrier - p_by_carrier p_df = pd.DataFrame( { carrier + " available": p_available_by_carrier[carrier], carrier + " dispatched": p_by_carrier[carrier], carrier + " curtailed": p_curtailed_by_carrier[carrier], } ) p_df[carrier + " capacity"] = capacity p_df["Wind Onshore curtailed"][p_df["Wind Onshore curtailed"] < 0.0] = 0.0 fig, ax = plt.subplots(1, 1) fig.set_size_inches(12, 6) p_df[[carrier + " dispatched", carrier + " curtailed"]].plot( kind="area", ax=ax, linewidth=3 ) p_df[[carrier + " available", carrier + " capacity"]].plot(ax=ax, linewidth=3) ax.set_xlabel("") ax.set_ylabel("Power [MW]") ax.set_ylim([0, 40000]) ax.legend() fig.tight_layout() # fig.savefig("scigrid-curtailment.png") ## Check power flow now = network.snapshots[0] for bus in network.buses.index: bus_sum = network.buses_t.p.loc[now, bus] branches_sum = 0 for comp in ["lines", "transformers"]: comps = getattr(network, comp) comps_t = getattr(network, comp + "_t") branches_sum += ( comps_t.p0.loc[now, comps.bus0 == bus].sum() - comps_t.p0.loc[now, comps.bus1 == bus].sum() ) if abs(bus_sum - branches_sum) > 1e-4: print(bus, bus_sum, branches_sum) ### Now perform a full Newton-Raphson power flow on the first hour # For the PF, set the P to the optimised P network.generators_t.p_set = network.generators_t.p_set.reindex( columns=network.generators.index ) network.generators_t.p_set = network.generators_t.p network.storage_units_t.p_set = network.storage_units_t.p_set.reindex( columns=network.storage_units.index ) network.storage_units_t.p_set = network.storage_units_t.p # set all buses to PV, since we don't know what Q set points are network.generators.control = "PV" # set slack # network.generators.loc["1 Coal","control"] = "Slack" # Need some PQ buses so that Jacobian doesn't break f = network.generators[network.generators.bus == "492"] network.generators.loc[f.index, "control"] = "PQ" print("Performing non-linear PF on results of LOPF:") info = network.pf() # any failed to converge? (~info.converged).any().any() print( "With the non-linear load flow, there is the following per unit loading\nof the full thermal rating:" ) print((network.lines_t.p0.loc[now] / network.lines.s_nom).describe()) # Get voltage angle differences df = network.lines.copy() for b in ["bus0", "bus1"]: df = pd.merge( df, network.buses_t.v_ang.loc[[now]].T, how="left", left_on=b, right_index=True ) s = df[str(now) + "_x"] - df[str(now) + "_y"] print("The voltage angle differences across the lines have (in degrees):") print((s * 180 / np.pi).describe()) # plot the reactive power fig, ax = plt.subplots(1, 1, subplot_kw={"projection": ccrs.PlateCarree()}) fig.set_size_inches(6, 6) q = network.buses_t.q.loc[now] bus_colors = pd.Series("r", network.buses.index) bus_colors[q < 0.0] = "b" network.plot( bus_sizes=abs(q), ax=ax, bus_colors=bus_colors, title="Reactive power feed-in (red=+ve, blue=-ve)", ) fig.tight_layout() # fig.savefig("reactive-power.png") network.generators_t.q.loc[now].sum() network.buses_t.q.loc[now].sum()
gpl-3.0
PatrickChrist/scikit-learn
examples/decomposition/plot_incremental_pca.py
243
1878
""" =============== Incremental PCA =============== Incremental principal component analysis (IPCA) is typically used as a replacement for principal component analysis (PCA) when the dataset to be decomposed is too large to fit in memory. IPCA builds a low-rank approximation for the input data using an amount of memory which is independent of the number of input data samples. It is still dependent on the input data features, but changing the batch size allows for control of memory usage. This example serves as a visual check that IPCA is able to find a similar projection of the data to PCA (to a sign flip), while only processing a few samples at a time. This can be considered a "toy example", as IPCA is intended for large datasets which do not fit in main memory, requiring incremental approaches. """ print(__doc__) # Authors: Kyle Kastner # License: BSD 3 clause import numpy as np import matplotlib.pyplot as plt from sklearn.datasets import load_iris from sklearn.decomposition import PCA, IncrementalPCA iris = load_iris() X = iris.data y = iris.target n_components = 2 ipca = IncrementalPCA(n_components=n_components, batch_size=10) X_ipca = ipca.fit_transform(X) pca = PCA(n_components=n_components) X_pca = pca.fit_transform(X) for X_transformed, title in [(X_ipca, "Incremental PCA"), (X_pca, "PCA")]: plt.figure(figsize=(8, 8)) for c, i, target_name in zip("rgb", [0, 1, 2], iris.target_names): plt.scatter(X_transformed[y == i, 0], X_transformed[y == i, 1], c=c, label=target_name) if "Incremental" in title: err = np.abs(np.abs(X_pca) - np.abs(X_ipca)).mean() plt.title(title + " of iris dataset\nMean absolute unsigned error " "%.6f" % err) else: plt.title(title + " of iris dataset") plt.legend(loc="best") plt.axis([-4, 4, -1.5, 1.5]) plt.show()
bsd-3-clause
open-mmlab/mmdetection
configs/resnest/cascade_rcnn_s50_fpn_syncbn-backbone+head_mstrain-range_1x_coco.py
1
4127
_base_ = '../cascade_rcnn/cascade_rcnn_r50_fpn_1x_coco.py' norm_cfg = dict(type='SyncBN', requires_grad=True) model = dict( backbone=dict( type='ResNeSt', stem_channels=64, depth=50, radix=2, reduction_factor=4, avg_down_stride=True, num_stages=4, out_indices=(0, 1, 2, 3), frozen_stages=1, norm_cfg=norm_cfg, norm_eval=False, style='pytorch', init_cfg=dict(type='Pretrained', checkpoint='open-mmlab://resnest50')), roi_head=dict( bbox_head=[ dict( type='Shared4Conv1FCBBoxHead', in_channels=256, conv_out_channels=256, fc_out_channels=1024, norm_cfg=norm_cfg, roi_feat_size=7, num_classes=80, bbox_coder=dict( type='DeltaXYWHBBoxCoder', target_means=[0., 0., 0., 0.], target_stds=[0.1, 0.1, 0.2, 0.2]), reg_class_agnostic=True, loss_cls=dict( type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0), loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0)), dict( type='Shared4Conv1FCBBoxHead', in_channels=256, conv_out_channels=256, fc_out_channels=1024, norm_cfg=norm_cfg, roi_feat_size=7, num_classes=80, bbox_coder=dict( type='DeltaXYWHBBoxCoder', target_means=[0., 0., 0., 0.], target_stds=[0.05, 0.05, 0.1, 0.1]), reg_class_agnostic=True, loss_cls=dict( type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0), loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0)), dict( type='Shared4Conv1FCBBoxHead', in_channels=256, conv_out_channels=256, fc_out_channels=1024, norm_cfg=norm_cfg, roi_feat_size=7, num_classes=80, bbox_coder=dict( type='DeltaXYWHBBoxCoder', target_means=[0., 0., 0., 0.], target_stds=[0.033, 0.033, 0.067, 0.067]), reg_class_agnostic=True, loss_cls=dict( type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0), loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0)) ], )) # # use ResNeSt img_norm img_norm_cfg = dict( mean=[123.68, 116.779, 103.939], std=[58.393, 57.12, 57.375], to_rgb=True) train_pipeline = [ dict(type='LoadImageFromFile'), dict( type='LoadAnnotations', with_bbox=True, with_mask=False, poly2mask=False), dict( type='Resize', img_scale=[(1333, 640), (1333, 800)], multiscale_mode='range', keep_ratio=True), dict(type='RandomFlip', flip_ratio=0.5), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='DefaultFormatBundle'), dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']), ] test_pipeline = [ dict(type='LoadImageFromFile'), dict( type='MultiScaleFlipAug', img_scale=(1333, 800), flip=False, transforms=[ dict(type='Resize', keep_ratio=True), dict(type='RandomFlip'), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='ImageToTensor', keys=['img']), dict(type='Collect', keys=['img']), ]) ] data = dict( train=dict(pipeline=train_pipeline), val=dict(pipeline=test_pipeline), test=dict(pipeline=test_pipeline))
apache-2.0
imaculate/scikit-learn
sklearn/ensemble/tests/test_weight_boosting.py
56
17158
"""Testing for the boost module (sklearn.ensemble.boost).""" import numpy as np from sklearn.utils.testing import assert_array_equal, assert_array_less from sklearn.utils.testing import assert_array_almost_equal from sklearn.utils.testing import assert_equal, assert_true from sklearn.utils.testing import assert_raises, assert_raises_regexp from sklearn.base import BaseEstimator from sklearn.model_selection import train_test_split from sklearn.model_selection import GridSearchCV from sklearn.ensemble import AdaBoostClassifier from sklearn.ensemble import AdaBoostRegressor from sklearn.ensemble import weight_boosting from scipy.sparse import csc_matrix from scipy.sparse import csr_matrix from scipy.sparse import coo_matrix from scipy.sparse import dok_matrix from scipy.sparse import lil_matrix from sklearn.svm import SVC, SVR from sklearn.tree import DecisionTreeClassifier, DecisionTreeRegressor from sklearn.utils import shuffle from sklearn import datasets # Common random state rng = np.random.RandomState(0) # Toy sample X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]] y_class = ["foo", "foo", "foo", 1, 1, 1] # test string class labels y_regr = [-1, -1, -1, 1, 1, 1] T = [[-1, -1], [2, 2], [3, 2]] y_t_class = ["foo", 1, 1] y_t_regr = [-1, 1, 1] # Load the iris dataset and randomly permute it iris = datasets.load_iris() perm = rng.permutation(iris.target.size) iris.data, iris.target = shuffle(iris.data, iris.target, random_state=rng) # Load the boston dataset and randomly permute it boston = datasets.load_boston() boston.data, boston.target = shuffle(boston.data, boston.target, random_state=rng) def test_samme_proba(): # Test the `_samme_proba` helper function. # Define some example (bad) `predict_proba` output. probs = np.array([[1, 1e-6, 0], [0.19, 0.6, 0.2], [-999, 0.51, 0.5], [1e-6, 1, 1e-9]]) probs /= np.abs(probs.sum(axis=1))[:, np.newaxis] # _samme_proba calls estimator.predict_proba. # Make a mock object so I can control what gets returned. class MockEstimator(object): def predict_proba(self, X): assert_array_equal(X.shape, probs.shape) return probs mock = MockEstimator() samme_proba = weight_boosting._samme_proba(mock, 3, np.ones_like(probs)) assert_array_equal(samme_proba.shape, probs.shape) assert_true(np.isfinite(samme_proba).all()) # Make sure that the correct elements come out as smallest -- # `_samme_proba` should preserve the ordering in each example. assert_array_equal(np.argmin(samme_proba, axis=1), [2, 0, 0, 2]) assert_array_equal(np.argmax(samme_proba, axis=1), [0, 1, 1, 1]) def test_classification_toy(): # Check classification on a toy dataset. for alg in ['SAMME', 'SAMME.R']: clf = AdaBoostClassifier(algorithm=alg, random_state=0) clf.fit(X, y_class) assert_array_equal(clf.predict(T), y_t_class) assert_array_equal(np.unique(np.asarray(y_t_class)), clf.classes_) assert_equal(clf.predict_proba(T).shape, (len(T), 2)) assert_equal(clf.decision_function(T).shape, (len(T),)) def test_regression_toy(): # Check classification on a toy dataset. clf = AdaBoostRegressor(random_state=0) clf.fit(X, y_regr) assert_array_equal(clf.predict(T), y_t_regr) def test_iris(): # Check consistency on dataset iris. classes = np.unique(iris.target) clf_samme = prob_samme = None for alg in ['SAMME', 'SAMME.R']: clf = AdaBoostClassifier(algorithm=alg) clf.fit(iris.data, iris.target) assert_array_equal(classes, clf.classes_) proba = clf.predict_proba(iris.data) if alg == "SAMME": clf_samme = clf prob_samme = proba assert_equal(proba.shape[1], len(classes)) assert_equal(clf.decision_function(iris.data).shape[1], len(classes)) score = clf.score(iris.data, iris.target) assert score > 0.9, "Failed with algorithm %s and score = %f" % \ (alg, score) # Somewhat hacky regression test: prior to # ae7adc880d624615a34bafdb1d75ef67051b8200, # predict_proba returned SAMME.R values for SAMME. clf_samme.algorithm = "SAMME.R" assert_array_less(0, np.abs(clf_samme.predict_proba(iris.data) - prob_samme)) def test_boston(): # Check consistency on dataset boston house prices. clf = AdaBoostRegressor(random_state=0) clf.fit(boston.data, boston.target) score = clf.score(boston.data, boston.target) assert score > 0.85 def test_staged_predict(): # Check staged predictions. rng = np.random.RandomState(0) iris_weights = rng.randint(10, size=iris.target.shape) boston_weights = rng.randint(10, size=boston.target.shape) # AdaBoost classification for alg in ['SAMME', 'SAMME.R']: clf = AdaBoostClassifier(algorithm=alg, n_estimators=10) clf.fit(iris.data, iris.target, sample_weight=iris_weights) predictions = clf.predict(iris.data) staged_predictions = [p for p in clf.staged_predict(iris.data)] proba = clf.predict_proba(iris.data) staged_probas = [p for p in clf.staged_predict_proba(iris.data)] score = clf.score(iris.data, iris.target, sample_weight=iris_weights) staged_scores = [ s for s in clf.staged_score( iris.data, iris.target, sample_weight=iris_weights)] assert_equal(len(staged_predictions), 10) assert_array_almost_equal(predictions, staged_predictions[-1]) assert_equal(len(staged_probas), 10) assert_array_almost_equal(proba, staged_probas[-1]) assert_equal(len(staged_scores), 10) assert_array_almost_equal(score, staged_scores[-1]) # AdaBoost regression clf = AdaBoostRegressor(n_estimators=10, random_state=0) clf.fit(boston.data, boston.target, sample_weight=boston_weights) predictions = clf.predict(boston.data) staged_predictions = [p for p in clf.staged_predict(boston.data)] score = clf.score(boston.data, boston.target, sample_weight=boston_weights) staged_scores = [ s for s in clf.staged_score( boston.data, boston.target, sample_weight=boston_weights)] assert_equal(len(staged_predictions), 10) assert_array_almost_equal(predictions, staged_predictions[-1]) assert_equal(len(staged_scores), 10) assert_array_almost_equal(score, staged_scores[-1]) def test_gridsearch(): # Check that base trees can be grid-searched. # AdaBoost classification boost = AdaBoostClassifier(base_estimator=DecisionTreeClassifier()) parameters = {'n_estimators': (1, 2), 'base_estimator__max_depth': (1, 2), 'algorithm': ('SAMME', 'SAMME.R')} clf = GridSearchCV(boost, parameters) clf.fit(iris.data, iris.target) # AdaBoost regression boost = AdaBoostRegressor(base_estimator=DecisionTreeRegressor(), random_state=0) parameters = {'n_estimators': (1, 2), 'base_estimator__max_depth': (1, 2)} clf = GridSearchCV(boost, parameters) clf.fit(boston.data, boston.target) def test_pickle(): # Check pickability. import pickle # Adaboost classifier for alg in ['SAMME', 'SAMME.R']: obj = AdaBoostClassifier(algorithm=alg) obj.fit(iris.data, iris.target) score = obj.score(iris.data, iris.target) s = pickle.dumps(obj) obj2 = pickle.loads(s) assert_equal(type(obj2), obj.__class__) score2 = obj2.score(iris.data, iris.target) assert_equal(score, score2) # Adaboost regressor obj = AdaBoostRegressor(random_state=0) obj.fit(boston.data, boston.target) score = obj.score(boston.data, boston.target) s = pickle.dumps(obj) obj2 = pickle.loads(s) assert_equal(type(obj2), obj.__class__) score2 = obj2.score(boston.data, boston.target) assert_equal(score, score2) def test_importances(): # Check variable importances. X, y = datasets.make_classification(n_samples=2000, n_features=10, n_informative=3, n_redundant=0, n_repeated=0, shuffle=False, random_state=1) for alg in ['SAMME', 'SAMME.R']: clf = AdaBoostClassifier(algorithm=alg) clf.fit(X, y) importances = clf.feature_importances_ assert_equal(importances.shape[0], 10) assert_equal((importances[:3, np.newaxis] >= importances[3:]).all(), True) def test_error(): # Test that it gives proper exception on deficient input. assert_raises(ValueError, AdaBoostClassifier(learning_rate=-1).fit, X, y_class) assert_raises(ValueError, AdaBoostClassifier(algorithm="foo").fit, X, y_class) assert_raises(ValueError, AdaBoostClassifier().fit, X, y_class, sample_weight=np.asarray([-1])) def test_base_estimator(): # Test different base estimators. from sklearn.ensemble import RandomForestClassifier from sklearn.svm import SVC # XXX doesn't work with y_class because RF doesn't support classes_ # Shouldn't AdaBoost run a LabelBinarizer? clf = AdaBoostClassifier(RandomForestClassifier()) clf.fit(X, y_regr) clf = AdaBoostClassifier(SVC(), algorithm="SAMME") clf.fit(X, y_class) from sklearn.ensemble import RandomForestRegressor from sklearn.svm import SVR clf = AdaBoostRegressor(RandomForestRegressor(), random_state=0) clf.fit(X, y_regr) clf = AdaBoostRegressor(SVR(), random_state=0) clf.fit(X, y_regr) # Check that an empty discrete ensemble fails in fit, not predict. X_fail = [[1, 1], [1, 1], [1, 1], [1, 1]] y_fail = ["foo", "bar", 1, 2] clf = AdaBoostClassifier(SVC(), algorithm="SAMME") assert_raises_regexp(ValueError, "worse than random", clf.fit, X_fail, y_fail) def test_sample_weight_missing(): from sklearn.linear_model import LogisticRegression from sklearn.cluster import KMeans clf = AdaBoostClassifier(KMeans(), algorithm="SAMME") assert_raises(ValueError, clf.fit, X, y_regr) clf = AdaBoostRegressor(KMeans()) assert_raises(ValueError, clf.fit, X, y_regr) def test_sparse_classification(): # Check classification with sparse input. class CustomSVC(SVC): """SVC variant that records the nature of the training set.""" def fit(self, X, y, sample_weight=None): """Modification on fit caries data type for later verification.""" super(CustomSVC, self).fit(X, y, sample_weight=sample_weight) self.data_type_ = type(X) return self X, y = datasets.make_multilabel_classification(n_classes=1, n_samples=15, n_features=5, random_state=42) # Flatten y to a 1d array y = np.ravel(y) X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0) for sparse_format in [csc_matrix, csr_matrix, lil_matrix, coo_matrix, dok_matrix]: X_train_sparse = sparse_format(X_train) X_test_sparse = sparse_format(X_test) # Trained on sparse format sparse_classifier = AdaBoostClassifier( base_estimator=CustomSVC(probability=True), random_state=1, algorithm="SAMME" ).fit(X_train_sparse, y_train) # Trained on dense format dense_classifier = AdaBoostClassifier( base_estimator=CustomSVC(probability=True), random_state=1, algorithm="SAMME" ).fit(X_train, y_train) # predict sparse_results = sparse_classifier.predict(X_test_sparse) dense_results = dense_classifier.predict(X_test) assert_array_equal(sparse_results, dense_results) # decision_function sparse_results = sparse_classifier.decision_function(X_test_sparse) dense_results = dense_classifier.decision_function(X_test) assert_array_equal(sparse_results, dense_results) # predict_log_proba sparse_results = sparse_classifier.predict_log_proba(X_test_sparse) dense_results = dense_classifier.predict_log_proba(X_test) assert_array_equal(sparse_results, dense_results) # predict_proba sparse_results = sparse_classifier.predict_proba(X_test_sparse) dense_results = dense_classifier.predict_proba(X_test) assert_array_equal(sparse_results, dense_results) # score sparse_results = sparse_classifier.score(X_test_sparse, y_test) dense_results = dense_classifier.score(X_test, y_test) assert_array_equal(sparse_results, dense_results) # staged_decision_function sparse_results = sparse_classifier.staged_decision_function( X_test_sparse) dense_results = dense_classifier.staged_decision_function(X_test) for sprase_res, dense_res in zip(sparse_results, dense_results): assert_array_equal(sprase_res, dense_res) # staged_predict sparse_results = sparse_classifier.staged_predict(X_test_sparse) dense_results = dense_classifier.staged_predict(X_test) for sprase_res, dense_res in zip(sparse_results, dense_results): assert_array_equal(sprase_res, dense_res) # staged_predict_proba sparse_results = sparse_classifier.staged_predict_proba(X_test_sparse) dense_results = dense_classifier.staged_predict_proba(X_test) for sprase_res, dense_res in zip(sparse_results, dense_results): assert_array_equal(sprase_res, dense_res) # staged_score sparse_results = sparse_classifier.staged_score(X_test_sparse, y_test) dense_results = dense_classifier.staged_score(X_test, y_test) for sprase_res, dense_res in zip(sparse_results, dense_results): assert_array_equal(sprase_res, dense_res) # Verify sparsity of data is maintained during training types = [i.data_type_ for i in sparse_classifier.estimators_] assert all([(t == csc_matrix or t == csr_matrix) for t in types]) def test_sparse_regression(): # Check regression with sparse input. class CustomSVR(SVR): """SVR variant that records the nature of the training set.""" def fit(self, X, y, sample_weight=None): """Modification on fit caries data type for later verification.""" super(CustomSVR, self).fit(X, y, sample_weight=sample_weight) self.data_type_ = type(X) return self X, y = datasets.make_regression(n_samples=15, n_features=50, n_targets=1, random_state=42) X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0) for sparse_format in [csc_matrix, csr_matrix, lil_matrix, coo_matrix, dok_matrix]: X_train_sparse = sparse_format(X_train) X_test_sparse = sparse_format(X_test) # Trained on sparse format sparse_classifier = AdaBoostRegressor( base_estimator=CustomSVR(), random_state=1 ).fit(X_train_sparse, y_train) # Trained on dense format dense_classifier = dense_results = AdaBoostRegressor( base_estimator=CustomSVR(), random_state=1 ).fit(X_train, y_train) # predict sparse_results = sparse_classifier.predict(X_test_sparse) dense_results = dense_classifier.predict(X_test) assert_array_equal(sparse_results, dense_results) # staged_predict sparse_results = sparse_classifier.staged_predict(X_test_sparse) dense_results = dense_classifier.staged_predict(X_test) for sprase_res, dense_res in zip(sparse_results, dense_results): assert_array_equal(sprase_res, dense_res) types = [i.data_type_ for i in sparse_classifier.estimators_] assert all([(t == csc_matrix or t == csr_matrix) for t in types]) def test_sample_weight_adaboost_regressor(): """ AdaBoostRegressor should work without sample_weights in the base estimator The random weighted sampling is done internally in the _boost method in AdaBoostRegressor. """ class DummyEstimator(BaseEstimator): def fit(self, X, y): pass def predict(self, X): return np.zeros(X.shape[0]) boost = AdaBoostRegressor(DummyEstimator(), n_estimators=3) boost.fit(X, y_regr) assert_equal(len(boost.estimator_weights_), len(boost.estimator_errors_))
bsd-3-clause
kcompher/thunder
thunder/utils/datasets.py
8
6850
""" Utilities for generating example datasets """ from numpy import array, asarray, random, shape, floor, dot, linspace, \ sin, sign, c_, ceil, inf, clip, zeros, max, size, sqrt, log, matrix from thunder.rdds.matrices import RowMatrix from thunder.rdds.series import Series class DataSets(object): def __init__(self, sc, returnParams=False): self.sc = sc self.returnParams = returnParams @staticmethod def make(sc, name, returnParams=False, **opts): try: return DATASET_MAKERS[name.lower()](sc, returnParams).generate(**opts) except KeyError: raise NotImplementedError("no dataset generator for '%s'" % name) @staticmethod def appendKeys(data): data = array(data) n = shape(data)[0] x = (random.rand(n) * n).astype(int) return zip(x, data) class KMeansData(DataSets): def generate(self, k=5, npartitions=10, ndims=5, nrecords=100, noise=0.1, seed=None): random.seed(seed) centers = random.randn(k, ndims) genFunc = lambda i: centers[int(floor(random.rand(1, 1) * k))] + noise*random.rand(ndims) dataLocal = map(genFunc, range(0, nrecords)) data = Series(self.sc.parallelize(self.appendKeys(dataLocal), npartitions)) if self.returnParams is True: return data, centers else: return data class PCAData(DataSets): def generate(self, k=3, npartitions=10, nrows=100, ncols=10, seed=None): random.seed(seed) u = random.randn(nrows, k) v = random.randn(k, ncols) a = dot(u, v) a += random.randn(shape(a)[0], shape(a)[1]) data = RowMatrix(self.sc.parallelize(self.appendKeys(a), npartitions)) if self.returnParams is True: return data, u, v else: return data class FactorAnalysisData(DataSets): def generate(self, q=1, p=3, nrows=50, npartitions=10, sigmas=None, seed=None): """ Generate data from a factor analysis model Parameters ---------- q : int, optional, default = 1 The number of factors generating this data p : int, optios, default = 3 The number of observed factors (p >= q) nrows : int, optional, default = 50 Number of observations we have sigmas = 1 x p ndarray, optional, default = None Scale of the noise to add, randomly generated from standard normal distribution if not given """ random.seed(seed) # Generate factor loadings (n x q) F = matrix(random.randn(nrows, q)) # Generate factor scores (q x p) w = matrix(random.randn(q, p)) # Generate non-zero the error covariances (1 x p) if sigmas is None: sigmas = random.randn(1, p) # Generate the error terms (n x p) # (each row gets scaled by our sigmas) epsilon = random.randn(nrows, p) * sigmas # Combine this to get our actual data (n x p) x = (F * w) + epsilon # Put the data in an RDD data = RowMatrix(self.sc.parallelize(self.appendKeys(x), npartitions)) if self.returnParams is True: return data, F, w, epsilon else: return data class RandomData(DataSets): def generate(self, nrows=50, ncols=50, npartitions=10, seed=None): """ Generate a matrix where every element is i.i.d. and drawn from a standard normal distribution Parameters ---------- nrows : int, optional, default = 50 Number of columns in the generated matrix nrows : int, optional, default = 50 Number of rows in the generated matrix """ rdd = self.sc.parallelize(self.appendKeys(xrange(nrows)), npartitions) if seed is not None: seed = hash(seed) def f((k, v)): random.seed(seed + v) return k, random.randn(ncols) else: def f((k, v)): return k, random.randn(ncols) rdd = rdd.map(f) return RowMatrix(rdd) class ICAData(DataSets): def generate(self, npartitions=10, nrows=100): random.seed(42) time = linspace(0, 10, nrows) s1 = sin(2 * time) s2 = sign(sin(3 * time)) s = c_[s1, s2] s += 0.2 * random.randn(s.shape[0], s.shape[1]) # Add noise s /= s.std(axis=0) a = array([[1, 1], [0.5, 2]]) x = dot(s, a.T) data = RowMatrix(self.sc.parallelize(self.appendKeys(x), npartitions)) if self.returnParams is True: return data, s, a else: return data class SourcesData(DataSets): def generate(self, dims=(100, 200), centers=5, t=100, margin=35, sd=3, noise=0.1, npartitions=1, seed=None): from scipy.ndimage.filters import gaussian_filter, gaussian_filter1d from skimage.draw import circle from thunder.rdds.fileio.imagesloader import ImagesLoader from thunder.extraction.source import SourceModel random.seed(seed) if len(dims) != 2: raise Exception("Can only generate for two-dimensional sources.") if size(centers) == 1: n = centers xcenters = (dims[0] - margin) * random.random_sample(n) + margin/2 ycenters = (dims[1] - margin) * random.random_sample(n) + margin/2 centers = zip(xcenters, ycenters) else: centers = asarray(centers) n = len(centers) ts = [random.randn(t) for i in range(0, n)] ts = clip(asarray([gaussian_filter1d(vec, 5) for vec in ts]), 0, 1) for ii, tt in enumerate(ts): ts[ii] = (tt / tt.max()) * 2 allframes = [] for tt in range(0, t): frame = zeros(dims) for nn in range(0, n): base = zeros(dims) base[centers[nn][0], centers[nn][1]] = 1 img = gaussian_filter(base, sd) img = img/max(img) frame += img * ts[nn][tt] frame += clip(random.randn(dims[0], dims[1]) * noise, 0, inf) allframes.append(frame) def pointToCircle(center, radius): rr, cc = circle(center[0], center[1], radius) return array(zip(rr, cc)) r = round(sd * 1.5) sources = SourceModel([pointToCircle(c, r) for c in centers]) data = ImagesLoader(self.sc).fromArrays(allframes, npartitions).astype('float') if self.returnParams is True: return data, ts, sources else: return data DATASET_MAKERS = { 'kmeans': KMeansData, 'pca': PCAData, 'factor': FactorAnalysisData, 'rand': RandomData, 'ica': ICAData, 'sources': SourcesData }
apache-2.0
sambitgaan/nupic
src/nupic/datafiles/extra/regression/makeDataset.py
34
4873
#! /usr/bin/env python # ---------------------------------------------------------------------- # Numenta Platform for Intelligent Computing (NuPIC) # Copyright (C) 2013, Numenta, Inc. Unless you have an agreement # with Numenta, Inc., for a separate license for this software code, the # following terms and conditions apply: # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero Public License version 3 as # published by the Free Software Foundation. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. # See the GNU Affero Public License for more details. # # You should have received a copy of the GNU Affero Public License # along with this program. If not, see http://www.gnu.org/licenses. # # http://numenta.org/licenses/ # ---------------------------------------------------------------------- """ Generate artificial datasets """ import numpy from nupic.data.file import File def scaleData(data, newScale=[0,100]): minVals = data.min(axis=0) maxVals = data.max(axis=0) data = (data-minVals)*(newScale[1]-newScale[0])/(maxVals-minVals) + newScale[0] return data def generatePolyData(numDataPoints=100, coefficients=[1, 0], noiseLevel = 0.1, dataScale = [0,100],): xvals = numpy.random.random(numDataPoints) yvals = numpy.polyval(coefficients, xvals) + \ noiseLevel * numpy.random.randn(numDataPoints) data = numpy.vstack((yvals, xvals)).transpose() scaledData = scaleData(data, newScale=dataScale) return scaledData def generateLinearData(numDataPoints=100, coefficients=[1, 1], noiseLevel = 0.1, dataScale = [0,100],): xvals = numpy.random.random((numDataPoints, len(coefficients))) yvals = (xvals * coefficients).sum(axis=1) + \ noiseLevel * numpy.random.randn(numDataPoints) data = numpy.hstack((yvals.reshape(-1,1), xvals)) scaledData = scaleData(data, newScale=dataScale) return scaledData def _generateLinearModel(numTrainingRecords, numTestingRecords, coefficients=[1], noiseLevel=0.1, dataScale=[0,100]): """ """ data = generateLinearData(numDataPoints=numTrainingRecords+numTestingRecords, coefficients=coefficients, noiseLevel=noiseLevel, dataScale=dataScale,) trainData = data[:numTrainingRecords] testData = data[numTrainingRecords:] return trainData, testData def _generateFile(filename, data): """ Parameters: ---------------------------------------------------------------- filename: name of .csv file to generate """ # Create the file print "Creating %s..." % (filename) numRecords, numFields = data.shape fields = [('field%d'%(i+1), 'float', '') for i in range(numFields)] outFile = File(filename, fields) for i in xrange(numRecords): outFile.write(data[i].tolist()) outFile.close() def generate(model, filenameTrain, filenameTest, numTrainingRecords=10000, numTestingRecords=1000,): """ """ numpy.random.seed(41) # ==================================================================== # Generate the model if model == 'linear0': trainData, testData = _generateLinearModel(numTrainingRecords, numTestingRecords, coefficients=[1], noiseLevel=0.1) #import pylab #pylab.figure() #pylab.plot(trainData[:,1], trainData[:,0], 'b.') ##pylab.figure() #pylab.plot(testData[:,1], testData[:,0],'g.') #pylab.show() elif model == 'linear1': trainData, testData = _generateLinearModel(numTrainingRecords, numTestingRecords, coefficients=[1,1], noiseLevel=0.1) elif model == 'linear2': trainData, testData = _generateLinearModel(numTrainingRecords, numTestingRecords, coefficients=[1,-3]) else: raise RuntimeError("Unsupported model") # ==================================================================== # Generate the training and testing files _generateFile(filename=filenameTrain, data=trainData,) _generateFile(filename=filenameTest, data=testData,)
agpl-3.0
aelaguiz/pyvotune
pyvotune/feature_extractors/theano_rbm_extractor.py
1
7540
# -*- coding: utf-8 -*- from collections import Mapping, Sequence from operator import itemgetter import math import time import numpy as np import scipy.sparse as sp import random from sklearn.preprocessing import normalize from sklearn.base import BaseEstimator, TransformerMixin from sklearn.utils import atleast2d_or_csr from pyvotune.theano import dataset, RBM from pyvotune.log import logger log = logger() global_theano = None global_T = None global_RandomStreams = None class TheanoRBMFeatureExtractor(BaseEstimator, TransformerMixin): def __init__( self, learning_rate=0.1, training_epochs=15, batch_size=20, n_resamples=10, n_hidden=500): self.learning_rate = learning_rate self.training_epochs = training_epochs self.batch_size = batch_size self.n_hidden = n_hidden self.n_resamples = n_resamples super(TheanoRBMFeatureExtractor, self).__init__() def fit(self, X, y=None): global global_theano global global_T global global_RandomStreams log.debug(u"RBM Fitting with lr={0} epochs={1} n_hidden={2}".format( self.learning_rate, self.training_epochs, self.n_hidden)) ## This prevents us from multiple importing theano which is important ## since it performs some global initialization, especially for cuda if not global_theano: log.debug(u"Importing Theano") import theano import theano.tensor as T from theano.tensor.shared_randomstreams import RandomStreams theano.config.warn.subtensor_merge_bug = False global_theano = theano global_T = T global_RandomStreams = RandomStreams self.rng = np.random.RandomState(123456) self.theano_rng = global_RandomStreams(self.rng.randint(2 ** 30)) self.n_visible = np.shape(X)[1] #log.debug(u"RBM Featureset has {0} visible nodes".format( #self.n_visible)) train_x, train_y = dataset.shared_dataset(global_theano, global_T, X, y, borrow=True) self.init_objects(train_x) self.train(train_x) return self def train(self, train_x): n_train_batches = train_x.get_value(borrow=True).shape[0] / self.batch_size log.debug( u"Fitting RBM With {0} training batches".format(n_train_batches)) for epoch in xrange(self.training_epochs): # go through the training set mean_cost = [] t_start = time.time() log.debug(u"RBM Training epoch {0}".format(epoch)) for batch_index in xrange(n_train_batches): t_batch_start = time.time() mean_cost += [self.train_rbm(batch_index)] t_batch_end = time.time() log.debug(u"Training batch {0} of {1} - took {2}s".format( batch_index, n_train_batches, t_batch_end - t_batch_start)) t_end = time.time() log.debug(u'Training epoch {0}, cost is {1} - took {2}s'.format( epoch, np.mean(mean_cost), t_end - t_start)) def transform(self, X, y=None): test_set_x, _ = dataset.shared_dataset(global_theano, global_T, X, borrow=True) # pick random test examples, with which to initialize the persistent chain persistent_vis_chain = global_theano.shared(np.asarray(test_set_x.get_value(borrow=True), dtype=global_theano.config.floatX)) [presig_hids, hid_mfs, hid_samples, presig_vis, vis_mfs, vis_samples], updates = \ global_theano.scan( self.rbm.gibbs_vhv, outputs_info=[None, None, None, None, None, persistent_vis_chain], n_steps=1) # add to updates the shared variable that takes care of our persistent # chain :. #updates.update({persistent_vis_chain: vis_samples[-1]}) # construct the function that implements our persistent chain. # we generate the "mean field" activations for plotting and the actual # samples for reinitializing the state of our persistent chain sample_fn = global_theano.function( [], [hid_mfs[-1], hid_samples[-1], vis_mfs[-1], vis_samples[-1]], name='sample_fn') ident = random.randint(0, 500) all_hid_mfs = [] all_vis_sample = [] all_hid_sample = [] for i in range(self.n_resamples): hid_mfs, hid_sample, vis_mfs, vis_sample = sample_fn() all_hid_mfs.append(hid_mfs) all_hid_sample.append(hid_sample) all_vis_sample.append(vis_sample) hidden_mean_field = np.mean(all_hid_mfs, axis=0) visible_mean_field = np.mean(all_vis_sample, axis=0) print "all_hid_mfs shape", np.shape(all_hid_mfs) print "Hidden mean field", np.shape(hidden_mean_field) print "Shapes", np.shape(hidden_mean_field), np.shape(all_hid_mfs) #self.sample_all(X, all_hid_sample, all_vis_sample, ident) #return hidden_mean_field return visible_mean_field #def sample_all(self, X, all_hid_sample, all_vis_sample, ident): #width = np.shape(X)[1] #sq = math.sqrt(width) #if width != sq ** 2: #return #hid_sample_mean_field = np.mean(all_hid_sample, axis=0) #vis_sample_mean_field = np.mean(all_vis_sample, axis=0) #all_recons = [] #n_padding = (width - self.n_hidden) / 2 #padding = np.zeros((n_padding, )) #for sample, recons, hidden in zip(X, vis_sample_mean_field, hid_sample_mean_field)[:10]: #padded_hidden = np.hstack((padding, hidden, padding)) #comb = np.hstack(( #sample.reshape(50, 50), recons.reshape(50, 50), #padded_hidden.reshape(50, 50))) #comb = np.flipud(comb) #all_recons.append(comb) #np_to_pil( #np.vstack(all_recons), colorize=True, #filename='samples/%i_samp_reconstruction_%i_%ires.png' % ( #ident, len(X), self.n_resamples)) def init_objects(self, train_x): # allocate symbolic variables for the data self.index = global_T.lscalar() # index to a [mini]batch self.x = global_T.matrix('x') # the data is presented as rasterized images # initialize storage for the persistent chain (state = hidden # layer of chain) self.persistent_chain = global_theano.shared( np.zeros( (self.batch_size, self.n_hidden), dtype=global_theano.config.floatX), borrow=True) # construct the RBM class self.rbm = RBM( global_theano, global_T, input=self.x, n_visible=self.n_visible, n_hidden=self.n_hidden, np_rng=self.rng, theano_rng=self.theano_rng) # get the cost and the gradient corresponding to one step of CD-15 self.cost, self.updates = self.rbm.get_cost_updates( lr=self.learning_rate, persistent=self.persistent_chain, k=15) # it is ok for a theano function to have no output # the purpose of train_rbm is solely to update the RBM parameters self.train_rbm = global_theano.function( [self.index], self.cost, updates=self.updates, givens={self.x: train_x[self.index * self.batch_size: (self.index + 1) * self.batch_size]}, name='train_rbm')
mit
rosmo/ansible
lib/ansible/modules/cloud/google/gcp_bigquery_table.py
3
54111
#!/usr/bin/python # -*- coding: utf-8 -*- # # Copyright (C) 2017 Google # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) # ---------------------------------------------------------------------------- # # *** AUTO GENERATED CODE *** AUTO GENERATED CODE *** # # ---------------------------------------------------------------------------- # # This file is automatically generated by Magic Modules and manual # changes will be clobbered when the file is regenerated. # # Please read more about how to change this file at # https://www.github.com/GoogleCloudPlatform/magic-modules # # ---------------------------------------------------------------------------- from __future__ import absolute_import, division, print_function __metaclass__ = type ################################################################################ # Documentation ################################################################################ ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ["preview"], 'supported_by': 'community'} DOCUMENTATION = ''' --- module: gcp_bigquery_table description: - A Table that belongs to a Dataset . short_description: Creates a GCP Table version_added: 2.8 author: Google Inc. (@googlecloudplatform) requirements: - python >= 2.6 - requests >= 2.18.4 - google-auth >= 1.3.0 options: state: description: - Whether the given object should exist in GCP choices: - present - absent default: present table_reference: description: - Reference describing the ID of this table. required: false suboptions: dataset_id: description: - The ID of the dataset containing this table. required: false project_id: description: - The ID of the project containing this table. required: false table_id: description: - The ID of the the table. required: false description: description: - A user-friendly description of the dataset. required: false friendly_name: description: - A descriptive name for this table. required: false labels: description: - The labels associated with this dataset. You can use these to organize and group your datasets . required: false name: description: - Name of the table. required: false view: description: - The view definition. required: false suboptions: use_legacy_sql: description: - Specifies whether to use BigQuery's legacy SQL for this view . required: false type: bool user_defined_function_resources: description: - Describes user-defined function resources used in the query. required: false suboptions: inline_code: description: - An inline resource that contains code for a user-defined function (UDF). Providing a inline code resource is equivalent to providing a URI for a file containing the same code. required: false resource_uri: description: - A code resource to load from a Google Cloud Storage URI (gs://bucket/path). required: false time_partitioning: description: - If specified, configures time-based partitioning for this table. required: false suboptions: expiration_ms: description: - Number of milliseconds for which to keep the storage for a partition. required: false type: description: - The only type supported is DAY, which will generate one partition per day. - 'Some valid choices include: "DAY"' required: false schema: description: - Describes the schema of this table. required: false suboptions: fields: description: - Describes the fields in a table. required: false suboptions: description: description: - The field description. The maximum length is 1,024 characters. required: false fields: description: - Describes the nested schema fields if the type property is set to RECORD. required: false mode: description: - The field mode. - 'Some valid choices include: "NULLABLE", "REQUIRED", "REPEATED"' required: false name: description: - The field name. required: false type: description: - The field data type. - 'Some valid choices include: "STRING", "BYTES", "INTEGER", "FLOAT", "TIMESTAMP", "DATE", "TIME", "DATETIME", "RECORD"' required: false encryption_configuration: description: - Custom encryption configuration. required: false suboptions: kms_key_name: description: - Describes the Cloud KMS encryption key that will be used to protect destination BigQuery table. The BigQuery Service Account associated with your project requires access to this encryption key. required: false expiration_time: description: - The time when this table expires, in milliseconds since the epoch. If not present, the table will persist indefinitely. required: false external_data_configuration: description: - Describes the data format, location, and other properties of a table stored outside of BigQuery. By defining these properties, the data source can then be queried as if it were a standard BigQuery table. required: false suboptions: autodetect: description: - Try to detect schema and format options automatically. Any option specified explicitly will be honored. required: false type: bool compression: description: - The compression type of the data source. - 'Some valid choices include: "GZIP", "NONE"' required: false ignore_unknown_values: description: - Indicates if BigQuery should allow extra values that are not represented in the table schema . required: false type: bool max_bad_records: description: - The maximum number of bad records that BigQuery can ignore when reading data . required: false default: '0' source_format: description: - The data format. - 'Some valid choices include: "CSV", "GOOGLE_SHEETS", "NEWLINE_DELIMITED_JSON", "AVRO", "DATASTORE_BACKUP", "BIGTABLE"' required: false source_uris: description: - The fully-qualified URIs that point to your data in Google Cloud. - 'For Google Cloud Storage URIs: Each URI can contain one ''*'' wildcard character and it must come after the ''bucket'' name. Size limits related to load jobs apply to external data sources. For Google Cloud Bigtable URIs: Exactly one URI can be specified and it has be a fully specified and valid HTTPS URL for a Google Cloud Bigtable table. For Google Cloud Datastore backups, exactly one URI can be specified. Also, the ''*'' wildcard character is not allowed.' required: false schema: description: - The schema for the data. Schema is required for CSV and JSON formats. required: false suboptions: fields: description: - Describes the fields in a table. required: false suboptions: description: description: - The field description. required: false fields: description: - Describes the nested schema fields if the type property is set to RECORD . required: false mode: description: - Field mode. - 'Some valid choices include: "NULLABLE", "REQUIRED", "REPEATED"' required: false name: description: - Field name. required: false type: description: - Field data type. - 'Some valid choices include: "STRING", "BYTES", "INTEGER", "FLOAT", "TIMESTAMP", "DATE", "TIME", "DATETIME", "RECORD"' required: false google_sheets_options: description: - Additional options if sourceFormat is set to GOOGLE_SHEETS. required: false suboptions: skip_leading_rows: description: - The number of rows at the top of a Google Sheet that BigQuery will skip when reading the data. required: false default: '0' csv_options: description: - Additional properties to set if sourceFormat is set to CSV. required: false suboptions: allow_jagged_rows: description: - Indicates if BigQuery should accept rows that are missing trailing optional columns . required: false type: bool allow_quoted_newlines: description: - Indicates if BigQuery should allow quoted data sections that contain newline characters in a CSV file . required: false type: bool encoding: description: - The character encoding of the data. - 'Some valid choices include: "UTF-8", "ISO-8859-1"' required: false field_delimiter: description: - The separator for fields in a CSV file. required: false quote: description: - The value that is used to quote data sections in a CSV file. required: false skip_leading_rows: description: - The number of rows at the top of a CSV file that BigQuery will skip when reading the data. required: false default: '0' bigtable_options: description: - Additional options if sourceFormat is set to BIGTABLE. required: false suboptions: ignore_unspecified_column_families: description: - If field is true, then the column families that are not specified in columnFamilies list are not exposed in the table schema . required: false type: bool read_rowkey_as_string: description: - If field is true, then the rowkey column families will be read and converted to string. required: false type: bool column_families: description: - List of column families to expose in the table schema along with their types. required: false suboptions: columns: description: - Lists of columns that should be exposed as individual fields as opposed to a list of (column name, value) pairs. required: false suboptions: encoding: description: - The encoding of the values when the type is not STRING. - 'Some valid choices include: "TEXT", "BINARY"' required: false field_name: description: - If the qualifier is not a valid BigQuery field identifier, a valid identifier must be provided as the column field name and is used as field name in queries. required: false only_read_latest: description: - If this is set, only the latest version of value in this column are exposed . required: false type: bool qualifier_string: description: - Qualifier of the column. required: true type: description: - The type to convert the value in cells of this column. - 'Some valid choices include: "BYTES", "STRING", "INTEGER", "FLOAT", "BOOLEAN"' required: false encoding: description: - The encoding of the values when the type is not STRING. - 'Some valid choices include: "TEXT", "BINARY"' required: false family_id: description: - Identifier of the column family. required: false only_read_latest: description: - If this is set only the latest version of value are exposed for all columns in this column family . required: false type: bool type: description: - The type to convert the value in cells of this column family. - 'Some valid choices include: "BYTES", "STRING", "INTEGER", "FLOAT", "BOOLEAN"' required: false dataset: description: - Name of the dataset. required: false extends_documentation_fragment: gcp ''' EXAMPLES = ''' - name: create a dataset gcp_bigquery_dataset: name: example_dataset dataset_reference: dataset_id: example_dataset project: "{{ gcp_project }}" auth_kind: "{{ gcp_cred_kind }}" service_account_file: "{{ gcp_cred_file }}" state: present register: dataset - name: create a table gcp_bigquery_table: name: example_table dataset: example_dataset table_reference: dataset_id: example_dataset project_id: test_project table_id: example_table project: test_project auth_kind: serviceaccount service_account_file: "/tmp/auth.pem" state: present ''' RETURN = ''' tableReference: description: - Reference describing the ID of this table. returned: success type: complex contains: datasetId: description: - The ID of the dataset containing this table. returned: success type: str projectId: description: - The ID of the project containing this table. returned: success type: str tableId: description: - The ID of the the table. returned: success type: str creationTime: description: - The time when this dataset was created, in milliseconds since the epoch. returned: success type: int description: description: - A user-friendly description of the dataset. returned: success type: str friendlyName: description: - A descriptive name for this table. returned: success type: str id: description: - An opaque ID uniquely identifying the table. returned: success type: str labels: description: - The labels associated with this dataset. You can use these to organize and group your datasets . returned: success type: dict lastModifiedTime: description: - The time when this table was last modified, in milliseconds since the epoch. returned: success type: int location: description: - The geographic location where the table resides. This value is inherited from the dataset. returned: success type: str name: description: - Name of the table. returned: success type: str numBytes: description: - The size of this table in bytes, excluding any data in the streaming buffer. returned: success type: int numLongTermBytes: description: - The number of bytes in the table that are considered "long-term storage". returned: success type: int numRows: description: - The number of rows of data in this table, excluding any data in the streaming buffer. returned: success type: int type: description: - Describes the table type. returned: success type: str view: description: - The view definition. returned: success type: complex contains: useLegacySql: description: - Specifies whether to use BigQuery's legacy SQL for this view . returned: success type: bool userDefinedFunctionResources: description: - Describes user-defined function resources used in the query. returned: success type: complex contains: inlineCode: description: - An inline resource that contains code for a user-defined function (UDF). Providing a inline code resource is equivalent to providing a URI for a file containing the same code. returned: success type: str resourceUri: description: - A code resource to load from a Google Cloud Storage URI (gs://bucket/path). returned: success type: str timePartitioning: description: - If specified, configures time-based partitioning for this table. returned: success type: complex contains: expirationMs: description: - Number of milliseconds for which to keep the storage for a partition. returned: success type: int type: description: - The only type supported is DAY, which will generate one partition per day. returned: success type: str streamingBuffer: description: - Contains information regarding this table's streaming buffer, if one is present. This field will be absent if the table is not being streamed to or if there is no data in the streaming buffer. returned: success type: complex contains: estimatedBytes: description: - A lower-bound estimate of the number of bytes currently in the streaming buffer. returned: success type: int estimatedRows: description: - A lower-bound estimate of the number of rows currently in the streaming buffer. returned: success type: int oldestEntryTime: description: - Contains the timestamp of the oldest entry in the streaming buffer, in milliseconds since the epoch, if the streaming buffer is available. returned: success type: int schema: description: - Describes the schema of this table. returned: success type: complex contains: fields: description: - Describes the fields in a table. returned: success type: complex contains: description: description: - The field description. The maximum length is 1,024 characters. returned: success type: str fields: description: - Describes the nested schema fields if the type property is set to RECORD. returned: success type: list mode: description: - The field mode. returned: success type: str name: description: - The field name. returned: success type: str type: description: - The field data type. returned: success type: str encryptionConfiguration: description: - Custom encryption configuration. returned: success type: complex contains: kmsKeyName: description: - Describes the Cloud KMS encryption key that will be used to protect destination BigQuery table. The BigQuery Service Account associated with your project requires access to this encryption key. returned: success type: str expirationTime: description: - The time when this table expires, in milliseconds since the epoch. If not present, the table will persist indefinitely. returned: success type: int externalDataConfiguration: description: - Describes the data format, location, and other properties of a table stored outside of BigQuery. By defining these properties, the data source can then be queried as if it were a standard BigQuery table. returned: success type: complex contains: autodetect: description: - Try to detect schema and format options automatically. Any option specified explicitly will be honored. returned: success type: bool compression: description: - The compression type of the data source. returned: success type: str ignoreUnknownValues: description: - Indicates if BigQuery should allow extra values that are not represented in the table schema . returned: success type: bool maxBadRecords: description: - The maximum number of bad records that BigQuery can ignore when reading data . returned: success type: int sourceFormat: description: - The data format. returned: success type: str sourceUris: description: - The fully-qualified URIs that point to your data in Google Cloud. - 'For Google Cloud Storage URIs: Each URI can contain one ''*'' wildcard character and it must come after the ''bucket'' name. Size limits related to load jobs apply to external data sources. For Google Cloud Bigtable URIs: Exactly one URI can be specified and it has be a fully specified and valid HTTPS URL for a Google Cloud Bigtable table. For Google Cloud Datastore backups, exactly one URI can be specified. Also, the ''*'' wildcard character is not allowed.' returned: success type: list schema: description: - The schema for the data. Schema is required for CSV and JSON formats. returned: success type: complex contains: fields: description: - Describes the fields in a table. returned: success type: complex contains: description: description: - The field description. returned: success type: str fields: description: - Describes the nested schema fields if the type property is set to RECORD . returned: success type: list mode: description: - Field mode. returned: success type: str name: description: - Field name. returned: success type: str type: description: - Field data type. returned: success type: str googleSheetsOptions: description: - Additional options if sourceFormat is set to GOOGLE_SHEETS. returned: success type: complex contains: skipLeadingRows: description: - The number of rows at the top of a Google Sheet that BigQuery will skip when reading the data. returned: success type: int csvOptions: description: - Additional properties to set if sourceFormat is set to CSV. returned: success type: complex contains: allowJaggedRows: description: - Indicates if BigQuery should accept rows that are missing trailing optional columns . returned: success type: bool allowQuotedNewlines: description: - Indicates if BigQuery should allow quoted data sections that contain newline characters in a CSV file . returned: success type: bool encoding: description: - The character encoding of the data. returned: success type: str fieldDelimiter: description: - The separator for fields in a CSV file. returned: success type: str quote: description: - The value that is used to quote data sections in a CSV file. returned: success type: str skipLeadingRows: description: - The number of rows at the top of a CSV file that BigQuery will skip when reading the data. returned: success type: int bigtableOptions: description: - Additional options if sourceFormat is set to BIGTABLE. returned: success type: complex contains: ignoreUnspecifiedColumnFamilies: description: - If field is true, then the column families that are not specified in columnFamilies list are not exposed in the table schema . returned: success type: bool readRowkeyAsString: description: - If field is true, then the rowkey column families will be read and converted to string. returned: success type: bool columnFamilies: description: - List of column families to expose in the table schema along with their types. returned: success type: complex contains: columns: description: - Lists of columns that should be exposed as individual fields as opposed to a list of (column name, value) pairs. returned: success type: complex contains: encoding: description: - The encoding of the values when the type is not STRING. returned: success type: str fieldName: description: - If the qualifier is not a valid BigQuery field identifier, a valid identifier must be provided as the column field name and is used as field name in queries. returned: success type: str onlyReadLatest: description: - If this is set, only the latest version of value in this column are exposed . returned: success type: bool qualifierString: description: - Qualifier of the column. returned: success type: str type: description: - The type to convert the value in cells of this column. returned: success type: str encoding: description: - The encoding of the values when the type is not STRING. returned: success type: str familyId: description: - Identifier of the column family. returned: success type: str onlyReadLatest: description: - If this is set only the latest version of value are exposed for all columns in this column family . returned: success type: bool type: description: - The type to convert the value in cells of this column family. returned: success type: str dataset: description: - Name of the dataset. returned: success type: str ''' ################################################################################ # Imports ################################################################################ from ansible.module_utils.gcp_utils import navigate_hash, GcpSession, GcpModule, GcpRequest, remove_nones_from_dict, replace_resource_dict import json ################################################################################ # Main ################################################################################ def main(): """Main function""" module = GcpModule( argument_spec=dict( state=dict(default='present', choices=['present', 'absent'], type='str'), table_reference=dict(type='dict', options=dict(dataset_id=dict(type='str'), project_id=dict(type='str'), table_id=dict(type='str'))), description=dict(type='str'), friendly_name=dict(type='str'), labels=dict(type='dict'), name=dict(type='str'), view=dict( type='dict', options=dict( use_legacy_sql=dict(type='bool'), user_defined_function_resources=dict( type='list', elements='dict', options=dict(inline_code=dict(type='str'), resource_uri=dict(type='str')) ), ), ), time_partitioning=dict(type='dict', options=dict(expiration_ms=dict(type='int'), type=dict(type='str'))), schema=dict( type='dict', options=dict( fields=dict( type='list', elements='dict', options=dict( description=dict(type='str'), fields=dict(type='list', elements='str'), mode=dict(type='str'), name=dict(type='str'), type=dict(type='str'), ), ) ), ), encryption_configuration=dict(type='dict', options=dict(kms_key_name=dict(type='str'))), expiration_time=dict(type='int'), external_data_configuration=dict( type='dict', options=dict( autodetect=dict(type='bool'), compression=dict(type='str'), ignore_unknown_values=dict(type='bool'), max_bad_records=dict(default=0, type='int'), source_format=dict(type='str'), source_uris=dict(type='list', elements='str'), schema=dict( type='dict', options=dict( fields=dict( type='list', elements='dict', options=dict( description=dict(type='str'), fields=dict(type='list', elements='str'), mode=dict(type='str'), name=dict(type='str'), type=dict(type='str'), ), ) ), ), google_sheets_options=dict(type='dict', options=dict(skip_leading_rows=dict(default=0, type='int'))), csv_options=dict( type='dict', options=dict( allow_jagged_rows=dict(type='bool'), allow_quoted_newlines=dict(type='bool'), encoding=dict(type='str'), field_delimiter=dict(type='str'), quote=dict(type='str'), skip_leading_rows=dict(default=0, type='int'), ), ), bigtable_options=dict( type='dict', options=dict( ignore_unspecified_column_families=dict(type='bool'), read_rowkey_as_string=dict(type='bool'), column_families=dict( type='list', elements='dict', options=dict( columns=dict( type='list', elements='dict', options=dict( encoding=dict(type='str'), field_name=dict(type='str'), only_read_latest=dict(type='bool'), qualifier_string=dict(required=True, type='str'), type=dict(type='str'), ), ), encoding=dict(type='str'), family_id=dict(type='str'), only_read_latest=dict(type='bool'), type=dict(type='str'), ), ), ), ), ), ), dataset=dict(type='str'), ) ) if not module.params['scopes']: module.params['scopes'] = ['https://www.googleapis.com/auth/bigquery'] state = module.params['state'] kind = 'bigquery#table' fetch = fetch_resource(module, self_link(module), kind) changed = False if fetch: if state == 'present': if is_different(module, fetch): update(module, self_link(module), kind) fetch = fetch_resource(module, self_link(module), kind) changed = True else: delete(module, self_link(module), kind) fetch = {} changed = True else: if state == 'present': fetch = create(module, collection(module), kind) changed = True else: fetch = {} fetch.update({'changed': changed}) module.exit_json(**fetch) def create(module, link, kind): auth = GcpSession(module, 'bigquery') return return_if_object(module, auth.post(link, resource_to_request(module)), kind) def update(module, link, kind): auth = GcpSession(module, 'bigquery') return return_if_object(module, auth.put(link, resource_to_request(module)), kind) def delete(module, link, kind): auth = GcpSession(module, 'bigquery') return return_if_object(module, auth.delete(link), kind) def resource_to_request(module): request = { u'kind': 'bigquery#table', u'tableReference': TableTablereference(module.params.get('table_reference', {}), module).to_request(), u'description': module.params.get('description'), u'friendlyName': module.params.get('friendly_name'), u'labels': module.params.get('labels'), u'name': module.params.get('name'), u'view': TableView(module.params.get('view', {}), module).to_request(), u'timePartitioning': TableTimepartitioning(module.params.get('time_partitioning', {}), module).to_request(), u'schema': TableSchema(module.params.get('schema', {}), module).to_request(), u'encryptionConfiguration': TableEncryptionconfiguration(module.params.get('encryption_configuration', {}), module).to_request(), u'expirationTime': module.params.get('expiration_time'), u'externalDataConfiguration': TableExternaldataconfiguration(module.params.get('external_data_configuration', {}), module).to_request(), } return_vals = {} for k, v in request.items(): if v or v is False: return_vals[k] = v return return_vals def fetch_resource(module, link, kind, allow_not_found=True): auth = GcpSession(module, 'bigquery') return return_if_object(module, auth.get(link), kind, allow_not_found) def self_link(module): return "https://www.googleapis.com/bigquery/v2/projects/{project}/datasets/{dataset}/tables/{name}".format(**module.params) def collection(module): return "https://www.googleapis.com/bigquery/v2/projects/{project}/datasets/{dataset}/tables".format(**module.params) def return_if_object(module, response, kind, allow_not_found=False): # If not found, return nothing. if allow_not_found and response.status_code == 404: return None # If no content, return nothing. if response.status_code == 204: return None try: module.raise_for_status(response) result = response.json() except getattr(json.decoder, 'JSONDecodeError', ValueError): module.fail_json(msg="Invalid JSON response with error: %s" % response.text) if navigate_hash(result, ['error', 'errors']): module.fail_json(msg=navigate_hash(result, ['error', 'errors'])) return result def is_different(module, response): request = resource_to_request(module) response = response_to_hash(module, response) # Remove all output-only from response. response_vals = {} for k, v in response.items(): if k in request: response_vals[k] = v request_vals = {} for k, v in request.items(): if k in response: request_vals[k] = v return GcpRequest(request_vals) != GcpRequest(response_vals) # Remove unnecessary properties from the response. # This is for doing comparisons with Ansible's current parameters. def response_to_hash(module, response): return { u'tableReference': TableTablereference(response.get(u'tableReference', {}), module).from_response(), u'creationTime': response.get(u'creationTime'), u'description': response.get(u'description'), u'friendlyName': response.get(u'friendlyName'), u'id': response.get(u'id'), u'labels': response.get(u'labels'), u'lastModifiedTime': response.get(u'lastModifiedTime'), u'location': response.get(u'location'), u'name': response.get(u'name'), u'numBytes': response.get(u'numBytes'), u'numLongTermBytes': response.get(u'numLongTermBytes'), u'numRows': response.get(u'numRows'), u'type': response.get(u'type'), u'view': TableView(response.get(u'view', {}), module).from_response(), u'timePartitioning': TableTimepartitioning(response.get(u'timePartitioning', {}), module).from_response(), u'streamingBuffer': TableStreamingbuffer(response.get(u'streamingBuffer', {}), module).from_response(), u'schema': TableSchema(response.get(u'schema', {}), module).from_response(), u'encryptionConfiguration': TableEncryptionconfiguration(response.get(u'encryptionConfiguration', {}), module).from_response(), u'expirationTime': response.get(u'expirationTime'), u'externalDataConfiguration': TableExternaldataconfiguration(response.get(u'externalDataConfiguration', {}), module).from_response(), } class TableTablereference(object): def __init__(self, request, module): self.module = module if request: self.request = request else: self.request = {} def to_request(self): return remove_nones_from_dict( {u'datasetId': self.request.get('dataset_id'), u'projectId': self.request.get('project_id'), u'tableId': self.request.get('table_id')} ) def from_response(self): return remove_nones_from_dict( {u'datasetId': self.request.get(u'datasetId'), u'projectId': self.request.get(u'projectId'), u'tableId': self.request.get(u'tableId')} ) class TableView(object): def __init__(self, request, module): self.module = module if request: self.request = request else: self.request = {} def to_request(self): return remove_nones_from_dict( { u'useLegacySql': self.request.get('use_legacy_sql'), u'userDefinedFunctionResources': TableUserdefinedfunctionresourcesArray( self.request.get('user_defined_function_resources', []), self.module ).to_request(), } ) def from_response(self): return remove_nones_from_dict( { u'useLegacySql': self.request.get(u'useLegacySql'), u'userDefinedFunctionResources': TableUserdefinedfunctionresourcesArray( self.request.get(u'userDefinedFunctionResources', []), self.module ).from_response(), } ) class TableUserdefinedfunctionresourcesArray(object): def __init__(self, request, module): self.module = module if request: self.request = request else: self.request = [] def to_request(self): items = [] for item in self.request: items.append(self._request_for_item(item)) return items def from_response(self): items = [] for item in self.request: items.append(self._response_from_item(item)) return items def _request_for_item(self, item): return remove_nones_from_dict({u'inlineCode': item.get('inline_code'), u'resourceUri': item.get('resource_uri')}) def _response_from_item(self, item): return remove_nones_from_dict({u'inlineCode': item.get(u'inlineCode'), u'resourceUri': item.get(u'resourceUri')}) class TableTimepartitioning(object): def __init__(self, request, module): self.module = module if request: self.request = request else: self.request = {} def to_request(self): return remove_nones_from_dict({u'expirationMs': self.request.get('expiration_ms'), u'type': self.request.get('type')}) def from_response(self): return remove_nones_from_dict({u'expirationMs': self.request.get(u'expirationMs'), u'type': self.request.get(u'type')}) class TableStreamingbuffer(object): def __init__(self, request, module): self.module = module if request: self.request = request else: self.request = {} def to_request(self): return remove_nones_from_dict({}) def from_response(self): return remove_nones_from_dict({}) class TableSchema(object): def __init__(self, request, module): self.module = module if request: self.request = request else: self.request = {} def to_request(self): return remove_nones_from_dict({u'fields': TableFieldsArray(self.request.get('fields', []), self.module).to_request()}) def from_response(self): return remove_nones_from_dict({u'fields': TableFieldsArray(self.request.get(u'fields', []), self.module).from_response()}) class TableFieldsArray(object): def __init__(self, request, module): self.module = module if request: self.request = request else: self.request = [] def to_request(self): items = [] for item in self.request: items.append(self._request_for_item(item)) return items def from_response(self): items = [] for item in self.request: items.append(self._response_from_item(item)) return items def _request_for_item(self, item): return remove_nones_from_dict( { u'description': item.get('description'), u'fields': item.get('fields'), u'mode': item.get('mode'), u'name': item.get('name'), u'type': item.get('type'), } ) def _response_from_item(self, item): return remove_nones_from_dict( { u'description': item.get(u'description'), u'fields': item.get(u'fields'), u'mode': item.get(u'mode'), u'name': item.get(u'name'), u'type': item.get(u'type'), } ) class TableEncryptionconfiguration(object): def __init__(self, request, module): self.module = module if request: self.request = request else: self.request = {} def to_request(self): return remove_nones_from_dict({u'kmsKeyName': self.request.get('kms_key_name')}) def from_response(self): return remove_nones_from_dict({u'kmsKeyName': self.request.get(u'kmsKeyName')}) class TableExternaldataconfiguration(object): def __init__(self, request, module): self.module = module if request: self.request = request else: self.request = {} def to_request(self): return remove_nones_from_dict( { u'autodetect': self.request.get('autodetect'), u'compression': self.request.get('compression'), u'ignoreUnknownValues': self.request.get('ignore_unknown_values'), u'maxBadRecords': self.request.get('max_bad_records'), u'sourceFormat': self.request.get('source_format'), u'sourceUris': self.request.get('source_uris'), u'schema': TableSchema(self.request.get('schema', {}), self.module).to_request(), u'googleSheetsOptions': TableGooglesheetsoptions(self.request.get('google_sheets_options', {}), self.module).to_request(), u'csvOptions': TableCsvoptions(self.request.get('csv_options', {}), self.module).to_request(), u'bigtableOptions': TableBigtableoptions(self.request.get('bigtable_options', {}), self.module).to_request(), } ) def from_response(self): return remove_nones_from_dict( { u'autodetect': self.request.get(u'autodetect'), u'compression': self.request.get(u'compression'), u'ignoreUnknownValues': self.request.get(u'ignoreUnknownValues'), u'maxBadRecords': self.request.get(u'maxBadRecords'), u'sourceFormat': self.request.get(u'sourceFormat'), u'sourceUris': self.request.get(u'sourceUris'), u'schema': TableSchema(self.request.get(u'schema', {}), self.module).from_response(), u'googleSheetsOptions': TableGooglesheetsoptions(self.request.get(u'googleSheetsOptions', {}), self.module).from_response(), u'csvOptions': TableCsvoptions(self.request.get(u'csvOptions', {}), self.module).from_response(), u'bigtableOptions': TableBigtableoptions(self.request.get(u'bigtableOptions', {}), self.module).from_response(), } ) class TableSchema(object): def __init__(self, request, module): self.module = module if request: self.request = request else: self.request = {} def to_request(self): return remove_nones_from_dict({u'fields': TableFieldsArray(self.request.get('fields', []), self.module).to_request()}) def from_response(self): return remove_nones_from_dict({u'fields': TableFieldsArray(self.request.get(u'fields', []), self.module).from_response()}) class TableFieldsArray(object): def __init__(self, request, module): self.module = module if request: self.request = request else: self.request = [] def to_request(self): items = [] for item in self.request: items.append(self._request_for_item(item)) return items def from_response(self): items = [] for item in self.request: items.append(self._response_from_item(item)) return items def _request_for_item(self, item): return remove_nones_from_dict( { u'description': item.get('description'), u'fields': item.get('fields'), u'mode': item.get('mode'), u'name': item.get('name'), u'type': item.get('type'), } ) def _response_from_item(self, item): return remove_nones_from_dict( { u'description': item.get(u'description'), u'fields': item.get(u'fields'), u'mode': item.get(u'mode'), u'name': item.get(u'name'), u'type': item.get(u'type'), } ) class TableGooglesheetsoptions(object): def __init__(self, request, module): self.module = module if request: self.request = request else: self.request = {} def to_request(self): return remove_nones_from_dict({u'skipLeadingRows': self.request.get('skip_leading_rows')}) def from_response(self): return remove_nones_from_dict({u'skipLeadingRows': self.request.get(u'skipLeadingRows')}) class TableCsvoptions(object): def __init__(self, request, module): self.module = module if request: self.request = request else: self.request = {} def to_request(self): return remove_nones_from_dict( { u'allowJaggedRows': self.request.get('allow_jagged_rows'), u'allowQuotedNewlines': self.request.get('allow_quoted_newlines'), u'encoding': self.request.get('encoding'), u'fieldDelimiter': self.request.get('field_delimiter'), u'quote': self.request.get('quote'), u'skipLeadingRows': self.request.get('skip_leading_rows'), } ) def from_response(self): return remove_nones_from_dict( { u'allowJaggedRows': self.request.get(u'allowJaggedRows'), u'allowQuotedNewlines': self.request.get(u'allowQuotedNewlines'), u'encoding': self.request.get(u'encoding'), u'fieldDelimiter': self.request.get(u'fieldDelimiter'), u'quote': self.request.get(u'quote'), u'skipLeadingRows': self.request.get(u'skipLeadingRows'), } ) class TableBigtableoptions(object): def __init__(self, request, module): self.module = module if request: self.request = request else: self.request = {} def to_request(self): return remove_nones_from_dict( { u'ignoreUnspecifiedColumnFamilies': self.request.get('ignore_unspecified_column_families'), u'readRowkeyAsString': self.request.get('read_rowkey_as_string'), u'columnFamilies': TableColumnfamiliesArray(self.request.get('column_families', []), self.module).to_request(), } ) def from_response(self): return remove_nones_from_dict( { u'ignoreUnspecifiedColumnFamilies': self.request.get(u'ignoreUnspecifiedColumnFamilies'), u'readRowkeyAsString': self.request.get(u'readRowkeyAsString'), u'columnFamilies': TableColumnfamiliesArray(self.request.get(u'columnFamilies', []), self.module).from_response(), } ) class TableColumnfamiliesArray(object): def __init__(self, request, module): self.module = module if request: self.request = request else: self.request = [] def to_request(self): items = [] for item in self.request: items.append(self._request_for_item(item)) return items def from_response(self): items = [] for item in self.request: items.append(self._response_from_item(item)) return items def _request_for_item(self, item): return remove_nones_from_dict( { u'columns': TableColumnsArray(item.get('columns', []), self.module).to_request(), u'encoding': item.get('encoding'), u'familyId': item.get('family_id'), u'onlyReadLatest': item.get('only_read_latest'), u'type': item.get('type'), } ) def _response_from_item(self, item): return remove_nones_from_dict( { u'columns': TableColumnsArray(item.get(u'columns', []), self.module).from_response(), u'encoding': item.get(u'encoding'), u'familyId': item.get(u'familyId'), u'onlyReadLatest': item.get(u'onlyReadLatest'), u'type': item.get(u'type'), } ) class TableColumnsArray(object): def __init__(self, request, module): self.module = module if request: self.request = request else: self.request = [] def to_request(self): items = [] for item in self.request: items.append(self._request_for_item(item)) return items def from_response(self): items = [] for item in self.request: items.append(self._response_from_item(item)) return items def _request_for_item(self, item): return remove_nones_from_dict( { u'encoding': item.get('encoding'), u'fieldName': item.get('field_name'), u'onlyReadLatest': item.get('only_read_latest'), u'qualifierString': item.get('qualifier_string'), u'type': item.get('type'), } ) def _response_from_item(self, item): return remove_nones_from_dict( { u'encoding': item.get(u'encoding'), u'fieldName': item.get(u'fieldName'), u'onlyReadLatest': item.get(u'onlyReadLatest'), u'qualifierString': item.get(u'qualifierString'), u'type': item.get(u'type'), } ) if __name__ == '__main__': main()
gpl-3.0
antsant/namebench
nb_third_party/dns/rdataset.py
215
11527
# Copyright (C) 2001-2007, 2009, 2010 Nominum, Inc. # # Permission to use, copy, modify, and distribute this software and its # documentation for any purpose with or without fee is hereby granted, # provided that the above copyright notice and this permission notice # appear in all copies. # # THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES # WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF # MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR # ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES # WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN # ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT # OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. """DNS rdatasets (an rdataset is a set of rdatas of a given type and class)""" import random import StringIO import struct import dns.exception import dns.rdatatype import dns.rdataclass import dns.rdata import dns.set # define SimpleSet here for backwards compatibility SimpleSet = dns.set.Set class DifferingCovers(dns.exception.DNSException): """Raised if an attempt is made to add a SIG/RRSIG whose covered type is not the same as that of the other rdatas in the rdataset.""" pass class IncompatibleTypes(dns.exception.DNSException): """Raised if an attempt is made to add rdata of an incompatible type.""" pass class Rdataset(dns.set.Set): """A DNS rdataset. @ivar rdclass: The class of the rdataset @type rdclass: int @ivar rdtype: The type of the rdataset @type rdtype: int @ivar covers: The covered type. Usually this value is dns.rdatatype.NONE, but if the rdtype is dns.rdatatype.SIG or dns.rdatatype.RRSIG, then the covers value will be the rdata type the SIG/RRSIG covers. The library treats the SIG and RRSIG types as if they were a family of types, e.g. RRSIG(A), RRSIG(NS), RRSIG(SOA). This makes RRSIGs much easier to work with than if RRSIGs covering different rdata types were aggregated into a single RRSIG rdataset. @type covers: int @ivar ttl: The DNS TTL (Time To Live) value @type ttl: int """ __slots__ = ['rdclass', 'rdtype', 'covers', 'ttl'] def __init__(self, rdclass, rdtype, covers=dns.rdatatype.NONE): """Create a new rdataset of the specified class and type. @see: the description of the class instance variables for the meaning of I{rdclass} and I{rdtype}""" super(Rdataset, self).__init__() self.rdclass = rdclass self.rdtype = rdtype self.covers = covers self.ttl = 0 def _clone(self): obj = super(Rdataset, self)._clone() obj.rdclass = self.rdclass obj.rdtype = self.rdtype obj.covers = self.covers obj.ttl = self.ttl return obj def update_ttl(self, ttl): """Set the TTL of the rdataset to be the lesser of the set's current TTL or the specified TTL. If the set contains no rdatas, set the TTL to the specified TTL. @param ttl: The TTL @type ttl: int""" if len(self) == 0: self.ttl = ttl elif ttl < self.ttl: self.ttl = ttl def add(self, rd, ttl=None): """Add the specified rdata to the rdataset. If the optional I{ttl} parameter is supplied, then self.update_ttl(ttl) will be called prior to adding the rdata. @param rd: The rdata @type rd: dns.rdata.Rdata object @param ttl: The TTL @type ttl: int""" # # If we're adding a signature, do some special handling to # check that the signature covers the same type as the # other rdatas in this rdataset. If this is the first rdata # in the set, initialize the covers field. # if self.rdclass != rd.rdclass or self.rdtype != rd.rdtype: raise IncompatibleTypes if not ttl is None: self.update_ttl(ttl) if self.rdtype == dns.rdatatype.RRSIG or \ self.rdtype == dns.rdatatype.SIG: covers = rd.covers() if len(self) == 0 and self.covers == dns.rdatatype.NONE: self.covers = covers elif self.covers != covers: raise DifferingCovers if dns.rdatatype.is_singleton(rd.rdtype) and len(self) > 0: self.clear() super(Rdataset, self).add(rd) def union_update(self, other): self.update_ttl(other.ttl) super(Rdataset, self).union_update(other) def intersection_update(self, other): self.update_ttl(other.ttl) super(Rdataset, self).intersection_update(other) def update(self, other): """Add all rdatas in other to self. @param other: The rdataset from which to update @type other: dns.rdataset.Rdataset object""" self.update_ttl(other.ttl) super(Rdataset, self).update(other) def __repr__(self): if self.covers == 0: ctext = '' else: ctext = '(' + dns.rdatatype.to_text(self.covers) + ')' return '<DNS ' + dns.rdataclass.to_text(self.rdclass) + ' ' + \ dns.rdatatype.to_text(self.rdtype) + ctext + ' rdataset>' def __str__(self): return self.to_text() def __eq__(self, other): """Two rdatasets are equal if they have the same class, type, and covers, and contain the same rdata. @rtype: bool""" if not isinstance(other, Rdataset): return False if self.rdclass != other.rdclass or \ self.rdtype != other.rdtype or \ self.covers != other.covers: return False return super(Rdataset, self).__eq__(other) def __ne__(self, other): return not self.__eq__(other) def to_text(self, name=None, origin=None, relativize=True, override_rdclass=None, **kw): """Convert the rdataset into DNS master file format. @see: L{dns.name.Name.choose_relativity} for more information on how I{origin} and I{relativize} determine the way names are emitted. Any additional keyword arguments are passed on to the rdata to_text() method. @param name: If name is not None, emit a RRs with I{name} as the owner name. @type name: dns.name.Name object @param origin: The origin for relative names, or None. @type origin: dns.name.Name object @param relativize: True if names should names be relativized @type relativize: bool""" if not name is None: name = name.choose_relativity(origin, relativize) ntext = str(name) pad = ' ' else: ntext = '' pad = '' s = StringIO.StringIO() if not override_rdclass is None: rdclass = override_rdclass else: rdclass = self.rdclass if len(self) == 0: # # Empty rdatasets are used for the question section, and in # some dynamic updates, so we don't need to print out the TTL # (which is meaningless anyway). # print >> s, '%s%s%s %s' % (ntext, pad, dns.rdataclass.to_text(rdclass), dns.rdatatype.to_text(self.rdtype)) else: for rd in self: print >> s, '%s%s%d %s %s %s' % \ (ntext, pad, self.ttl, dns.rdataclass.to_text(rdclass), dns.rdatatype.to_text(self.rdtype), rd.to_text(origin=origin, relativize=relativize, **kw)) # # We strip off the final \n for the caller's convenience in printing # return s.getvalue()[:-1] def to_wire(self, name, file, compress=None, origin=None, override_rdclass=None, want_shuffle=True): """Convert the rdataset to wire format. @param name: The owner name of the RRset that will be emitted @type name: dns.name.Name object @param file: The file to which the wire format data will be appended @type file: file @param compress: The compression table to use; the default is None. @type compress: dict @param origin: The origin to be appended to any relative names when they are emitted. The default is None. @returns: the number of records emitted @rtype: int """ if not override_rdclass is None: rdclass = override_rdclass want_shuffle = False else: rdclass = self.rdclass file.seek(0, 2) if len(self) == 0: name.to_wire(file, compress, origin) stuff = struct.pack("!HHIH", self.rdtype, rdclass, 0, 0) file.write(stuff) return 1 else: if want_shuffle: l = list(self) random.shuffle(l) else: l = self for rd in l: name.to_wire(file, compress, origin) stuff = struct.pack("!HHIH", self.rdtype, rdclass, self.ttl, 0) file.write(stuff) start = file.tell() rd.to_wire(file, compress, origin) end = file.tell() assert end - start < 65536 file.seek(start - 2) stuff = struct.pack("!H", end - start) file.write(stuff) file.seek(0, 2) return len(self) def match(self, rdclass, rdtype, covers): """Returns True if this rdataset matches the specified class, type, and covers""" if self.rdclass == rdclass and \ self.rdtype == rdtype and \ self.covers == covers: return True return False def from_text_list(rdclass, rdtype, ttl, text_rdatas): """Create an rdataset with the specified class, type, and TTL, and with the specified list of rdatas in text format. @rtype: dns.rdataset.Rdataset object """ if isinstance(rdclass, str): rdclass = dns.rdataclass.from_text(rdclass) if isinstance(rdtype, str): rdtype = dns.rdatatype.from_text(rdtype) r = Rdataset(rdclass, rdtype) r.update_ttl(ttl) for t in text_rdatas: rd = dns.rdata.from_text(r.rdclass, r.rdtype, t) r.add(rd) return r def from_text(rdclass, rdtype, ttl, *text_rdatas): """Create an rdataset with the specified class, type, and TTL, and with the specified rdatas in text format. @rtype: dns.rdataset.Rdataset object """ return from_text_list(rdclass, rdtype, ttl, text_rdatas) def from_rdata_list(ttl, rdatas): """Create an rdataset with the specified TTL, and with the specified list of rdata objects. @rtype: dns.rdataset.Rdataset object """ if len(rdatas) == 0: raise ValueError("rdata list must not be empty") r = None for rd in rdatas: if r is None: r = Rdataset(rd.rdclass, rd.rdtype) r.update_ttl(ttl) first_time = False r.add(rd) return r def from_rdata(ttl, *rdatas): """Create an rdataset with the specified TTL, and with the specified rdata objects. @rtype: dns.rdataset.Rdataset object """ return from_rdata_list(ttl, rdatas)
apache-2.0
azavea/raster-foundry
app-hitl/hitl/src/hitl/rv/train.py
2
2641
from rastervision.core.data import (ClassConfig, DatasetConfig, Scene) from rastervision.pytorch_learner import ( SemanticSegmentationGeoDataConfig, SemanticSegmentationLearner, SemanticSegmentationLearnerConfig, SemanticSegmentationModelConfig, SolverConfig, SemanticSegmentationRandomWindowGeoDataset, ExternalModuleConfig) def train(scene: Scene, class_config: ClassConfig, output_dir: str, **kwargs) -> SemanticSegmentationLearner: chip_sz = kwargs.get('chip_sz', 256) img_sz = kwargs.get('img_sz', 256) num_classes = len(class_config) img_channels = len(scene.raster_source.channel_order) init_weights = kwargs.get('init_weights', None) data_cfg = SemanticSegmentationGeoDataConfig( num_workers=kwargs.get('num_workers', 4), img_channels=img_channels, img_sz=img_sz, scene_dataset=DatasetConfig( class_config=class_config, train_scenes=[], validation_scenes=[]), window_opts={}) if kwargs.get('external_model'): entrypoint_kwargs = dict( name='resnet18', fpn_type='panoptic', num_classes=num_classes, fpn_channels=128, in_channels=img_channels, out_size=(img_sz, img_sz), pretrained=(init_weights is None)) entrypoint_kwargs.update(kwargs.get('external_model_kwargs', {})) model_cfg = SemanticSegmentationModelConfig( external_def=ExternalModuleConfig( github_repo='AdeelH/pytorch-fpn:0.3', name='fpn', entrypoint='make_fpn_resnet', entrypoint_kwargs=entrypoint_kwargs), init_weights=init_weights) else: model_cfg = SemanticSegmentationModelConfig( pretrained=(init_weights is None), init_weights=init_weights) learner_cfg = SemanticSegmentationLearnerConfig( output_uri=output_dir, data=data_cfg, model=model_cfg, solver=SolverConfig( batch_sz=kwargs.get('batch_sz', 16), num_epochs=kwargs.get('num_epochs', 1), lr=kwargs.get('lr', 3e-4))) train_ds = SemanticSegmentationRandomWindowGeoDataset( scene, out_size=img_sz, size_lims=(chip_sz, chip_sz + 1), max_windows=kwargs.get('num_chips', 100)) val_ds = SemanticSegmentationRandomWindowGeoDataset( scene, out_size=img_sz, size_lims=(img_sz, img_sz + 1), max_windows=10) learner = SemanticSegmentationLearner( cfg=learner_cfg, train_ds=train_ds, valid_ds=val_ds, test_ds=val_ds) learner.train() return learner
apache-2.0
imaculate/scikit-learn
sklearn/cluster/dbscan_.py
6
12319
# -*- coding: utf-8 -*- """ DBSCAN: Density-Based Spatial Clustering of Applications with Noise """ # Author: Robert Layton <robertlayton@gmail.com> # Joel Nothman <joel.nothman@gmail.com> # Lars Buitinck # # License: BSD 3 clause import numpy as np from scipy import sparse from ..base import BaseEstimator, ClusterMixin from ..metrics import pairwise_distances from ..utils import check_array, check_consistent_length from ..utils.fixes import astype from ..neighbors import NearestNeighbors from ._dbscan_inner import dbscan_inner def dbscan(X, eps=0.5, min_samples=5, metric='minkowski', algorithm='auto', leaf_size=30, p=2, sample_weight=None, n_jobs=1): """Perform DBSCAN clustering from vector array or distance matrix. Read more in the :ref:`User Guide <dbscan>`. Parameters ---------- X : array or sparse (CSR) matrix of shape (n_samples, n_features), or \ array of shape (n_samples, n_samples) A feature array, or array of distances between samples if ``metric='precomputed'``. eps : float, optional The maximum distance between two samples for them to be considered as in the same neighborhood. min_samples : int, optional The number of samples (or total weight) in a neighborhood for a point to be considered as a core point. This includes the point itself. metric : string, or callable The metric to use when calculating distance between instances in a feature array. If metric is a string or callable, it must be one of the options allowed by metrics.pairwise.pairwise_distances for its metric parameter. If metric is "precomputed", X is assumed to be a distance matrix and must be square. X may be a sparse matrix, in which case only "nonzero" elements may be considered neighbors for DBSCAN. algorithm : {'auto', 'ball_tree', 'kd_tree', 'brute'}, optional The algorithm to be used by the NearestNeighbors module to compute pointwise distances and find nearest neighbors. See NearestNeighbors module documentation for details. leaf_size : int, optional (default = 30) Leaf size passed to BallTree or cKDTree. This can affect the speed of the construction and query, as well as the memory required to store the tree. The optimal value depends on the nature of the problem. p : float, optional The power of the Minkowski metric to be used to calculate distance between points. sample_weight : array, shape (n_samples,), optional Weight of each sample, such that a sample with a weight of at least ``min_samples`` is by itself a core sample; a sample with negative weight may inhibit its eps-neighbor from being core. Note that weights are absolute, and default to 1. n_jobs : int, optional (default = 1) The number of parallel jobs to run for neighbors search. If ``-1``, then the number of jobs is set to the number of CPU cores. Returns ------- core_samples : array [n_core_samples] Indices of core samples. labels : array [n_samples] Cluster labels for each point. Noisy samples are given the label -1. Notes ----- See examples/cluster/plot_dbscan.py for an example. This implementation bulk-computes all neighborhood queries, which increases the memory complexity to O(n.d) where d is the average number of neighbors, while original DBSCAN had memory complexity O(n). Sparse neighborhoods can be precomputed using :func:`NearestNeighbors.radius_neighbors_graph <sklearn.neighbors.NearestNeighbors.radius_neighbors_graph>` with ``mode='distance'``. References ---------- Ester, M., H. P. Kriegel, J. Sander, and X. Xu, "A Density-Based Algorithm for Discovering Clusters in Large Spatial Databases with Noise". In: Proceedings of the 2nd International Conference on Knowledge Discovery and Data Mining, Portland, OR, AAAI Press, pp. 226-231. 1996 """ if not eps > 0.0: raise ValueError("eps must be positive.") X = check_array(X, accept_sparse='csr') if sample_weight is not None: sample_weight = np.asarray(sample_weight) check_consistent_length(X, sample_weight) # Calculate neighborhood for all samples. This leaves the original point # in, which needs to be considered later (i.e. point i is in the # neighborhood of point i. While True, its useless information) if metric == 'precomputed' and sparse.issparse(X): neighborhoods = np.empty(X.shape[0], dtype=object) X.sum_duplicates() # XXX: modifies X's internals in-place X_mask = X.data <= eps masked_indices = astype(X.indices, np.intp, copy=False)[X_mask] masked_indptr = np.cumsum(X_mask)[X.indptr[1:] - 1] # insert the diagonal: a point is its own neighbor, but 0 distance # means absence from sparse matrix data masked_indices = np.insert(masked_indices, masked_indptr, np.arange(X.shape[0])) masked_indptr = masked_indptr[:-1] + np.arange(1, X.shape[0]) # split into rows neighborhoods[:] = np.split(masked_indices, masked_indptr) else: neighbors_model = NearestNeighbors(radius=eps, algorithm=algorithm, leaf_size=leaf_size, metric=metric, p=p, n_jobs=n_jobs) neighbors_model.fit(X) # This has worst case O(n^2) memory complexity neighborhoods = neighbors_model.radius_neighbors(X, eps, return_distance=False) if sample_weight is None: n_neighbors = np.array([len(neighbors) for neighbors in neighborhoods]) else: n_neighbors = np.array([np.sum(sample_weight[neighbors]) for neighbors in neighborhoods]) # Initially, all samples are noise. labels = -np.ones(X.shape[0], dtype=np.intp) # A list of all core samples found. core_samples = np.asarray(n_neighbors >= min_samples, dtype=np.uint8) dbscan_inner(core_samples, neighborhoods, labels) return np.where(core_samples)[0], labels class DBSCAN(BaseEstimator, ClusterMixin): """Perform DBSCAN clustering from vector array or distance matrix. DBSCAN - Density-Based Spatial Clustering of Applications with Noise. Finds core samples of high density and expands clusters from them. Good for data which contains clusters of similar density. Read more in the :ref:`User Guide <dbscan>`. Parameters ---------- eps : float, optional The maximum distance between two samples for them to be considered as in the same neighborhood. min_samples : int, optional The number of samples (or total weight) in a neighborhood for a point to be considered as a core point. This includes the point itself. metric : string, or callable The metric to use when calculating distance between instances in a feature array. If metric is a string or callable, it must be one of the options allowed by metrics.pairwise.calculate_distance for its metric parameter. If metric is "precomputed", X is assumed to be a distance matrix and must be square. X may be a sparse matrix, in which case only "nonzero" elements may be considered neighbors for DBSCAN. .. versionadded:: 0.17 metric *precomputed* to accept precomputed sparse matrix. algorithm : {'auto', 'ball_tree', 'kd_tree', 'brute'}, optional The algorithm to be used by the NearestNeighbors module to compute pointwise distances and find nearest neighbors. See NearestNeighbors module documentation for details. leaf_size : int, optional (default = 30) Leaf size passed to BallTree or cKDTree. This can affect the speed of the construction and query, as well as the memory required to store the tree. The optimal value depends on the nature of the problem. p : float, optional The power of the Minkowski metric to be used to calculate distance between points. n_jobs : int, optional (default = 1) The number of parallel jobs to run. If ``-1``, then the number of jobs is set to the number of CPU cores. Attributes ---------- core_sample_indices_ : array, shape = [n_core_samples] Indices of core samples. components_ : array, shape = [n_core_samples, n_features] Copy of each core sample found by training. labels_ : array, shape = [n_samples] Cluster labels for each point in the dataset given to fit(). Noisy samples are given the label -1. Notes ----- See examples/cluster/plot_dbscan.py for an example. This implementation bulk-computes all neighborhood queries, which increases the memory complexity to O(n.d) where d is the average number of neighbors, while original DBSCAN had memory complexity O(n). Sparse neighborhoods can be precomputed using :func:`NearestNeighbors.radius_neighbors_graph <sklearn.neighbors.NearestNeighbors.radius_neighbors_graph>` with ``mode='distance'``. References ---------- Ester, M., H. P. Kriegel, J. Sander, and X. Xu, "A Density-Based Algorithm for Discovering Clusters in Large Spatial Databases with Noise". In: Proceedings of the 2nd International Conference on Knowledge Discovery and Data Mining, Portland, OR, AAAI Press, pp. 226-231. 1996 """ def __init__(self, eps=0.5, min_samples=5, metric='euclidean', algorithm='auto', leaf_size=30, p=None, n_jobs=1): self.eps = eps self.min_samples = min_samples self.metric = metric self.algorithm = algorithm self.leaf_size = leaf_size self.p = p self.n_jobs = n_jobs def fit(self, X, y=None, sample_weight=None): """Perform DBSCAN clustering from features or distance matrix. Parameters ---------- X : array or sparse (CSR) matrix of shape (n_samples, n_features), or \ array of shape (n_samples, n_samples) A feature array, or array of distances between samples if ``metric='precomputed'``. sample_weight : array, shape (n_samples,), optional Weight of each sample, such that a sample with a weight of at least ``min_samples`` is by itself a core sample; a sample with negative weight may inhibit its eps-neighbor from being core. Note that weights are absolute, and default to 1. """ X = check_array(X, accept_sparse='csr') clust = dbscan(X, sample_weight=sample_weight, **self.get_params()) self.core_sample_indices_, self.labels_ = clust if len(self.core_sample_indices_): # fix for scipy sparse indexing issue self.components_ = X[self.core_sample_indices_].copy() else: # no core samples self.components_ = np.empty((0, X.shape[1])) return self def fit_predict(self, X, y=None, sample_weight=None): """Performs clustering on X and returns cluster labels. Parameters ---------- X : array or sparse (CSR) matrix of shape (n_samples, n_features), or \ array of shape (n_samples, n_samples) A feature array, or array of distances between samples if ``metric='precomputed'``. sample_weight : array, shape (n_samples,), optional Weight of each sample, such that a sample with a weight of at least ``min_samples`` is by itself a core sample; a sample with negative weight may inhibit its eps-neighbor from being core. Note that weights are absolute, and default to 1. Returns ------- y : ndarray, shape (n_samples,) cluster labels """ self.fit(X, sample_weight=sample_weight) return self.labels_
bsd-3-clause
hendrycks/robustness
ImageNet-C/create_c/make_imagenet_c.py
1
20564
# -*- coding: utf-8 -*- import os from PIL import Image import os.path import time import torch import torchvision.datasets as dset import torchvision.transforms as trn import torch.utils.data as data import numpy as np from PIL import Image # /////////////// Data Loader /////////////// IMG_EXTENSIONS = ['.jpg', '.jpeg', '.png', '.ppm', '.bmp', '.pgm'] def is_image_file(filename): """Checks if a file is an image. Args: filename (string): path to a file Returns: bool: True if the filename ends with a known image extension """ filename_lower = filename.lower() return any(filename_lower.endswith(ext) for ext in IMG_EXTENSIONS) def find_classes(dir): classes = [d for d in os.listdir(dir) if os.path.isdir(os.path.join(dir, d))] classes.sort() class_to_idx = {classes[i]: i for i in range(len(classes))} return classes, class_to_idx def make_dataset(dir, class_to_idx): images = [] dir = os.path.expanduser(dir) for target in sorted(os.listdir(dir)): d = os.path.join(dir, target) if not os.path.isdir(d): continue for root, _, fnames in sorted(os.walk(d)): for fname in sorted(fnames): if is_image_file(fname): path = os.path.join(root, fname) item = (path, class_to_idx[target]) images.append(item) return images def pil_loader(path): # open path as file to avoid ResourceWarning (https://github.com/python-pillow/Pillow/issues/835) with open(path, 'rb') as f: img = Image.open(f) return img.convert('RGB') def accimage_loader(path): import accimage try: return accimage.Image(path) except IOError: # Potentially a decoding problem, fall back to PIL.Image return pil_loader(path) def default_loader(path): from torchvision import get_image_backend if get_image_backend() == 'accimage': return accimage_loader(path) else: return pil_loader(path) class DistortImageFolder(data.Dataset): def __init__(self, root, method, severity, transform=None, target_transform=None, loader=default_loader): classes, class_to_idx = find_classes(root) imgs = make_dataset(root, class_to_idx) if len(imgs) == 0: raise (RuntimeError("Found 0 images in subfolders of: " + root + "\n" "Supported image extensions are: " + ",".join( IMG_EXTENSIONS))) self.root = root self.method = method self.severity = severity self.imgs = imgs self.classes = classes self.class_to_idx = class_to_idx self.idx_to_class = {v: k for k, v in class_to_idx.items()} self.transform = transform self.target_transform = target_transform self.loader = loader def __getitem__(self, index): path, target = self.imgs[index] img = self.loader(path) if self.transform is not None: img = self.transform(img) img = self.method(img, self.severity) if self.target_transform is not None: target = self.target_transform(target) save_path = '/share/data/vision-greg/DistortedImageNet/JPEG/' + self.method.__name__ + \ '/' + str(self.severity) + '/' + self.idx_to_class[target] if not os.path.exists(save_path): os.makedirs(save_path) save_path += path[path.rindex('/'):] Image.fromarray(np.uint8(img)).save(save_path, quality=85, optimize=True) return 0 # we do not care about returning the data def __len__(self): return len(self.imgs) # /////////////// Distortion Helpers /////////////// import skimage as sk from skimage.filters import gaussian from io import BytesIO from wand.image import Image as WandImage from wand.api import library as wandlibrary import wand.color as WandColor import ctypes from PIL import Image as PILImage import cv2 from scipy.ndimage import zoom as scizoom from scipy.ndimage.interpolation import map_coordinates import warnings warnings.simplefilter("ignore", UserWarning) def auc(errs): # area under the alteration error curve area = 0 for i in range(1, len(errs)): area += (errs[i] + errs[i - 1]) / 2 area /= len(errs) - 1 return area def disk(radius, alias_blur=0.1, dtype=np.float32): if radius <= 8: L = np.arange(-8, 8 + 1) ksize = (3, 3) else: L = np.arange(-radius, radius + 1) ksize = (5, 5) X, Y = np.meshgrid(L, L) aliased_disk = np.array((X ** 2 + Y ** 2) <= radius ** 2, dtype=dtype) aliased_disk /= np.sum(aliased_disk) # supersample disk to antialias return cv2.GaussianBlur(aliased_disk, ksize=ksize, sigmaX=alias_blur) # Tell Python about the C method wandlibrary.MagickMotionBlurImage.argtypes = (ctypes.c_void_p, # wand ctypes.c_double, # radius ctypes.c_double, # sigma ctypes.c_double) # angle # Extend wand.image.Image class to include method signature class MotionImage(WandImage): def motion_blur(self, radius=0.0, sigma=0.0, angle=0.0): wandlibrary.MagickMotionBlurImage(self.wand, radius, sigma, angle) # modification of https://github.com/FLHerne/mapgen/blob/master/diamondsquare.py def plasma_fractal(mapsize=256, wibbledecay=3): """ Generate a heightmap using diamond-square algorithm. Return square 2d array, side length 'mapsize', of floats in range 0-255. 'mapsize' must be a power of two. """ assert (mapsize & (mapsize - 1) == 0) maparray = np.empty((mapsize, mapsize), dtype=np.float_) maparray[0, 0] = 0 stepsize = mapsize wibble = 100 def wibbledmean(array): return array / 4 + wibble * np.random.uniform(-wibble, wibble, array.shape) def fillsquares(): """For each square of points stepsize apart, calculate middle value as mean of points + wibble""" cornerref = maparray[0:mapsize:stepsize, 0:mapsize:stepsize] squareaccum = cornerref + np.roll(cornerref, shift=-1, axis=0) squareaccum += np.roll(squareaccum, shift=-1, axis=1) maparray[stepsize // 2:mapsize:stepsize, stepsize // 2:mapsize:stepsize] = wibbledmean(squareaccum) def filldiamonds(): """For each diamond of points stepsize apart, calculate middle value as mean of points + wibble""" mapsize = maparray.shape[0] drgrid = maparray[stepsize // 2:mapsize:stepsize, stepsize // 2:mapsize:stepsize] ulgrid = maparray[0:mapsize:stepsize, 0:mapsize:stepsize] ldrsum = drgrid + np.roll(drgrid, 1, axis=0) lulsum = ulgrid + np.roll(ulgrid, -1, axis=1) ltsum = ldrsum + lulsum maparray[0:mapsize:stepsize, stepsize // 2:mapsize:stepsize] = wibbledmean(ltsum) tdrsum = drgrid + np.roll(drgrid, 1, axis=1) tulsum = ulgrid + np.roll(ulgrid, -1, axis=0) ttsum = tdrsum + tulsum maparray[stepsize // 2:mapsize:stepsize, 0:mapsize:stepsize] = wibbledmean(ttsum) while stepsize >= 2: fillsquares() filldiamonds() stepsize //= 2 wibble /= wibbledecay maparray -= maparray.min() return maparray / maparray.max() def clipped_zoom(img, zoom_factor): h = img.shape[0] # ceil crop height(= crop width) ch = int(np.ceil(h / zoom_factor)) top = (h - ch) // 2 img = scizoom(img[top:top + ch, top:top + ch], (zoom_factor, zoom_factor, 1), order=1) # trim off any extra pixels trim_top = (img.shape[0] - h) // 2 return img[trim_top:trim_top + h, trim_top:trim_top + h] # /////////////// End Distortion Helpers /////////////// # /////////////// Distortions /////////////// def gaussian_noise(x, severity=1): c = [.08, .12, 0.18, 0.26, 0.38][severity - 1] x = np.array(x) / 255. return np.clip(x + np.random.normal(size=x.shape, scale=c), 0, 1) * 255 def shot_noise(x, severity=1): c = [60, 25, 12, 5, 3][severity - 1] x = np.array(x) / 255. return np.clip(np.random.poisson(x * c) / c, 0, 1) * 255 def impulse_noise(x, severity=1): c = [.03, .06, .09, 0.17, 0.27][severity - 1] x = sk.util.random_noise(np.array(x) / 255., mode='s&p', amount=c) return np.clip(x, 0, 1) * 255 def speckle_noise(x, severity=1): c = [.15, .2, 0.35, 0.45, 0.6][severity - 1] x = np.array(x) / 255. return np.clip(x + x * np.random.normal(size=x.shape, scale=c), 0, 1) * 255 def fgsm(x, source_net, severity=1): c = [8, 16, 32, 64, 128][severity - 1] x = V(x, requires_grad=True) logits = source_net(x) source_net.zero_grad() loss = F.cross_entropy(logits, V(logits.data.max(1)[1].squeeze_()), size_average=False) loss.backward() return standardize(torch.clamp(unstandardize(x.data) + c / 255. * unstandardize(torch.sign(x.grad.data)), 0, 1)) def gaussian_blur(x, severity=1): c = [1, 2, 3, 4, 6][severity - 1] x = gaussian(np.array(x) / 255., sigma=c, multichannel=True) return np.clip(x, 0, 1) * 255 def glass_blur(x, severity=1): # sigma, max_delta, iterations c = [(0.7, 1, 2), (0.9, 2, 1), (1, 2, 3), (1.1, 3, 2), (1.5, 4, 2)][severity - 1] x = np.uint8(gaussian(np.array(x) / 255., sigma=c[0], multichannel=True) * 255) # locally shuffle pixels for i in range(c[2]): for h in range(224 - c[1], c[1], -1): for w in range(224 - c[1], c[1], -1): dx, dy = np.random.randint(-c[1], c[1], size=(2,)) h_prime, w_prime = h + dy, w + dx # swap x[h, w], x[h_prime, w_prime] = x[h_prime, w_prime], x[h, w] return np.clip(gaussian(x / 255., sigma=c[0], multichannel=True), 0, 1) * 255 def defocus_blur(x, severity=1): c = [(3, 0.1), (4, 0.5), (6, 0.5), (8, 0.5), (10, 0.5)][severity - 1] x = np.array(x) / 255. kernel = disk(radius=c[0], alias_blur=c[1]) channels = [] for d in range(3): channels.append(cv2.filter2D(x[:, :, d], -1, kernel)) channels = np.array(channels).transpose((1, 2, 0)) # 3x224x224 -> 224x224x3 return np.clip(channels, 0, 1) * 255 def motion_blur(x, severity=1): c = [(10, 3), (15, 5), (15, 8), (15, 12), (20, 15)][severity - 1] output = BytesIO() x.save(output, format='PNG') x = MotionImage(blob=output.getvalue()) x.motion_blur(radius=c[0], sigma=c[1], angle=np.random.uniform(-45, 45)) x = cv2.imdecode(np.fromstring(x.make_blob(), np.uint8), cv2.IMREAD_UNCHANGED) if x.shape != (224, 224): return np.clip(x[..., [2, 1, 0]], 0, 255) # BGR to RGB else: # greyscale to RGB return np.clip(np.array([x, x, x]).transpose((1, 2, 0)), 0, 255) def zoom_blur(x, severity=1): c = [np.arange(1, 1.11, 0.01), np.arange(1, 1.16, 0.01), np.arange(1, 1.21, 0.02), np.arange(1, 1.26, 0.02), np.arange(1, 1.31, 0.03)][severity - 1] x = (np.array(x) / 255.).astype(np.float32) out = np.zeros_like(x) for zoom_factor in c: out += clipped_zoom(x, zoom_factor) x = (x + out) / (len(c) + 1) return np.clip(x, 0, 1) * 255 # def barrel(x, severity=1): # c = [(0,0.03,0.03), (0.05,0.05,0.05), (0.1,0.1,0.1), # (0.2,0.2,0.2), (0.1,0.3,0.6)][severity - 1] # # output = BytesIO() # x.save(output, format='PNG') # # x = WandImage(blob=output.getvalue()) # x.distort('barrel', c) # # x = cv2.imdecode(np.fromstring(x.make_blob(), np.uint8), # cv2.IMREAD_UNCHANGED) # # if x.shape != (224, 224): # return np.clip(x[..., [2, 1, 0]], 0, 255) # BGR to RGB # else: # greyscale to RGB # return np.clip(np.array([x, x, x]).transpose((1, 2, 0)), 0, 255) def fog(x, severity=1): c = [(1.5, 2), (2, 2), (2.5, 1.7), (2.5, 1.5), (3, 1.4)][severity - 1] x = np.array(x) / 255. max_val = x.max() x += c[0] * plasma_fractal(wibbledecay=c[1])[:224, :224][..., np.newaxis] return np.clip(x * max_val / (max_val + c[0]), 0, 1) * 255 def frost(x, severity=1): c = [(1, 0.4), (0.8, 0.6), (0.7, 0.7), (0.65, 0.7), (0.6, 0.75)][severity - 1] idx = np.random.randint(5) filename = ['./frost1.png', './frost2.png', './frost3.png', './frost4.jpg', './frost5.jpg', './frost6.jpg'][idx] frost = cv2.imread(filename) # randomly crop and convert to rgb x_start, y_start = np.random.randint(0, frost.shape[0] - 224), np.random.randint(0, frost.shape[1] - 224) frost = frost[x_start:x_start + 224, y_start:y_start + 224][..., [2, 1, 0]] return np.clip(c[0] * np.array(x) + c[1] * frost, 0, 255) def snow(x, severity=1): c = [(0.1, 0.3, 3, 0.5, 10, 4, 0.8), (0.2, 0.3, 2, 0.5, 12, 4, 0.7), (0.55, 0.3, 4, 0.9, 12, 8, 0.7), (0.55, 0.3, 4.5, 0.85, 12, 8, 0.65), (0.55, 0.3, 2.5, 0.85, 12, 12, 0.55)][severity - 1] x = np.array(x, dtype=np.float32) / 255. snow_layer = np.random.normal(size=x.shape[:2], loc=c[0], scale=c[1]) # [:2] for monochrome snow_layer = clipped_zoom(snow_layer[..., np.newaxis], c[2]) snow_layer[snow_layer < c[3]] = 0 snow_layer = PILImage.fromarray((np.clip(snow_layer.squeeze(), 0, 1) * 255).astype(np.uint8), mode='L') output = BytesIO() snow_layer.save(output, format='PNG') snow_layer = MotionImage(blob=output.getvalue()) snow_layer.motion_blur(radius=c[4], sigma=c[5], angle=np.random.uniform(-135, -45)) snow_layer = cv2.imdecode(np.fromstring(snow_layer.make_blob(), np.uint8), cv2.IMREAD_UNCHANGED) / 255. snow_layer = snow_layer[..., np.newaxis] x = c[6] * x + (1 - c[6]) * np.maximum(x, cv2.cvtColor(x, cv2.COLOR_RGB2GRAY).reshape(224, 224, 1) * 1.5 + 0.5) return np.clip(x + snow_layer + np.rot90(snow_layer, k=2), 0, 1) * 255 def spatter(x, severity=1): c = [(0.65, 0.3, 4, 0.69, 0.6, 0), (0.65, 0.3, 3, 0.68, 0.6, 0), (0.65, 0.3, 2, 0.68, 0.5, 0), (0.65, 0.3, 1, 0.65, 1.5, 1), (0.67, 0.4, 1, 0.65, 1.5, 1)][severity - 1] x = np.array(x, dtype=np.float32) / 255. liquid_layer = np.random.normal(size=x.shape[:2], loc=c[0], scale=c[1]) liquid_layer = gaussian(liquid_layer, sigma=c[2]) liquid_layer[liquid_layer < c[3]] = 0 if c[5] == 0: liquid_layer = (liquid_layer * 255).astype(np.uint8) dist = 255 - cv2.Canny(liquid_layer, 50, 150) dist = cv2.distanceTransform(dist, cv2.DIST_L2, 5) _, dist = cv2.threshold(dist, 20, 20, cv2.THRESH_TRUNC) dist = cv2.blur(dist, (3, 3)).astype(np.uint8) dist = cv2.equalizeHist(dist) # ker = np.array([[-1,-2,-3],[-2,0,0],[-3,0,1]], dtype=np.float32) # ker -= np.mean(ker) ker = np.array([[-2, -1, 0], [-1, 1, 1], [0, 1, 2]]) dist = cv2.filter2D(dist, cv2.CV_8U, ker) dist = cv2.blur(dist, (3, 3)).astype(np.float32) m = cv2.cvtColor(liquid_layer * dist, cv2.COLOR_GRAY2BGRA) m /= np.max(m, axis=(0, 1)) m *= c[4] # water is pale turqouise color = np.concatenate((175 / 255. * np.ones_like(m[..., :1]), 238 / 255. * np.ones_like(m[..., :1]), 238 / 255. * np.ones_like(m[..., :1])), axis=2) color = cv2.cvtColor(color, cv2.COLOR_BGR2BGRA) x = cv2.cvtColor(x, cv2.COLOR_BGR2BGRA) return cv2.cvtColor(np.clip(x + m * color, 0, 1), cv2.COLOR_BGRA2BGR) * 255 else: m = np.where(liquid_layer > c[3], 1, 0) m = gaussian(m.astype(np.float32), sigma=c[4]) m[m < 0.8] = 0 # m = np.abs(m) ** (1/c[4]) # mud brown color = np.concatenate((63 / 255. * np.ones_like(x[..., :1]), 42 / 255. * np.ones_like(x[..., :1]), 20 / 255. * np.ones_like(x[..., :1])), axis=2) color *= m[..., np.newaxis] x *= (1 - m[..., np.newaxis]) return np.clip(x + color, 0, 1) * 255 def contrast(x, severity=1): c = [0.4, .3, .2, .1, .05][severity - 1] x = np.array(x) / 255. means = np.mean(x, axis=(0, 1), keepdims=True) return np.clip((x - means) * c + means, 0, 1) * 255 def brightness(x, severity=1): c = [.1, .2, .3, .4, .5][severity - 1] x = np.array(x) / 255. x = sk.color.rgb2hsv(x) x[:, :, 2] = np.clip(x[:, :, 2] + c, 0, 1) x = sk.color.hsv2rgb(x) return np.clip(x, 0, 1) * 255 def saturate(x, severity=1): c = [(0.3, 0), (0.1, 0), (2, 0), (5, 0.1), (20, 0.2)][severity - 1] x = np.array(x) / 255. x = sk.color.rgb2hsv(x) x[:, :, 1] = np.clip(x[:, :, 1] * c[0] + c[1], 0, 1) x = sk.color.hsv2rgb(x) return np.clip(x, 0, 1) * 255 def jpeg_compression(x, severity=1): c = [25, 18, 15, 10, 7][severity - 1] output = BytesIO() x.save(output, 'JPEG', quality=c) x = PILImage.open(output) return x def pixelate(x, severity=1): c = [0.6, 0.5, 0.4, 0.3, 0.25][severity - 1] x = x.resize((int(224 * c), int(224 * c)), PILImage.BOX) x = x.resize((224, 224), PILImage.BOX) return x # mod of https://gist.github.com/erniejunior/601cdf56d2b424757de5 def elastic_transform(image, severity=1): c = [(244 * 2, 244 * 0.7, 244 * 0.1), # 244 should have been 224, but ultimately nothing is incorrect (244 * 2, 244 * 0.08, 244 * 0.2), (244 * 0.05, 244 * 0.01, 244 * 0.02), (244 * 0.07, 244 * 0.01, 244 * 0.02), (244 * 0.12, 244 * 0.01, 244 * 0.02)][severity - 1] image = np.array(image, dtype=np.float32) / 255. shape = image.shape shape_size = shape[:2] # random affine center_square = np.float32(shape_size) // 2 square_size = min(shape_size) // 3 pts1 = np.float32([center_square + square_size, [center_square[0] + square_size, center_square[1] - square_size], center_square - square_size]) pts2 = pts1 + np.random.uniform(-c[2], c[2], size=pts1.shape).astype(np.float32) M = cv2.getAffineTransform(pts1, pts2) image = cv2.warpAffine(image, M, shape_size[::-1], borderMode=cv2.BORDER_REFLECT_101) dx = (gaussian(np.random.uniform(-1, 1, size=shape[:2]), c[1], mode='reflect', truncate=3) * c[0]).astype(np.float32) dy = (gaussian(np.random.uniform(-1, 1, size=shape[:2]), c[1], mode='reflect', truncate=3) * c[0]).astype(np.float32) dx, dy = dx[..., np.newaxis], dy[..., np.newaxis] x, y, z = np.meshgrid(np.arange(shape[1]), np.arange(shape[0]), np.arange(shape[2])) indices = np.reshape(y + dy, (-1, 1)), np.reshape(x + dx, (-1, 1)), np.reshape(z, (-1, 1)) return np.clip(map_coordinates(image, indices, order=1, mode='reflect').reshape(shape), 0, 1) * 255 # /////////////// End Distortions /////////////// # /////////////// Further Setup /////////////// def save_distorted(method=gaussian_noise): for severity in range(1, 6): print(method.__name__, severity) distorted_dataset = DistortImageFolder( root="/share/data/vision-greg/ImageNet/clsloc/images/val", method=method, severity=severity, transform=trn.Compose([trn.Resize(256), trn.CenterCrop(224)])) distorted_dataset_loader = torch.utils.data.DataLoader( distorted_dataset, batch_size=100, shuffle=False, num_workers=4) for _ in distorted_dataset_loader: continue # /////////////// End Further Setup /////////////// # /////////////// Display Results /////////////// import collections print('\nUsing ImageNet data') d = collections.OrderedDict() d['Gaussian Noise'] = gaussian_noise d['Shot Noise'] = shot_noise d['Impulse Noise'] = impulse_noise d['Defocus Blur'] = defocus_blur d['Glass Blur'] = glass_blur d['Motion Blur'] = motion_blur d['Zoom Blur'] = zoom_blur d['Snow'] = snow d['Frost'] = frost d['Fog'] = fog d['Brightness'] = brightness d['Contrast'] = contrast d['Elastic'] = elastic_transform d['Pixelate'] = pixelate d['JPEG'] = jpeg_compression d['Speckle Noise'] = speckle_noise d['Gaussian Blur'] = gaussian_blur d['Spatter'] = spatter d['Saturate'] = saturate for method_name in d.keys(): save_distorted(d[method_name])
apache-2.0
johncadigan/scrapy-sci
wallpaper_demo/wallpaper/classifier_pipelines.py
2
1983
# -*- coding: utf-8 -*- # Define your item pipelines here # # Don't forget to add your pipeline to the ITEM_PIPELINES setting # See: http://doc.scrapy.org/en/latest/topics/item-pipeline.html import os from scrapy.contrib.exporter import JsonItemExporter from scrapy.exceptions import DropItem from sklearn.linear_model import LogisticRegression from scrapy_sci.status import Status, Reader from scrapy_sci.classifier import ClassifierFactory class ClassifiersPipeline(object): def __init__(self): self.status = Status() self.classifiers = [] self.exporters = {} for classifier in self.status.classifiers.keys(): CF = ClassifierFactory(self.status.classifiers[classifier]) CF.create_data_set("both") lc = lc = CF.create_classifier(LogisticRegression(C=1e5), self.status.classifiers[classifier]['features']()) lc.fit() self.classifiers.append((classifier, lc)) self.classifiers = sorted(self.classifiers, key = lambda a: a[1].estimate_accuracy(5, verbose=True)) print "Classifier {0} needs the most improvement; selected for export".format(self.classifiers[0][0]) for classification in self.status.classifiers[self.classifiers[0][0]]['classifications']: f = file("{0}.json".format(classification), "wb") self.exporters[classification] = JsonItemExporter(f) def process_item(self, item, spider): keep = True for i, (name, classifier) in enumerate(self.classifiers): item_classification = classifier.classify(item) if i == 0: export_classification = item_classification if self.status.classifiers[name]['classifications'][item_classification] == False: raise DropItem("Item removed by classifier: {0}".format(name)) if keep == True: self.exporters[export_classification].export_item(item)
bsd-3-clause
imaculate/scikit-learn
examples/datasets/plot_iris_dataset.py
34
1929
#!/usr/bin/python # -*- coding: utf-8 -*- """ ========================================================= The Iris Dataset ========================================================= This data sets consists of 3 different types of irises' (Setosa, Versicolour, and Virginica) petal and sepal length, stored in a 150x4 numpy.ndarray The rows being the samples and the columns being: Sepal Length, Sepal Width, Petal Length and Petal Width. The below plot uses the first two features. See `here <https://en.wikipedia.org/wiki/Iris_flower_data_set>`_ for more information on this dataset. """ print(__doc__) # Code source: Gaël Varoquaux # Modified for documentation by Jaques Grobler # License: BSD 3 clause import matplotlib.pyplot as plt from mpl_toolkits.mplot3d import Axes3D from sklearn import datasets from sklearn.decomposition import PCA # import some data to play with iris = datasets.load_iris() X = iris.data[:, :2] # we only take the first two features. Y = iris.target x_min, x_max = X[:, 0].min() - .5, X[:, 0].max() + .5 y_min, y_max = X[:, 1].min() - .5, X[:, 1].max() + .5 plt.figure(2, figsize=(8, 6)) plt.clf() # Plot the training points plt.scatter(X[:, 0], X[:, 1], c=Y, cmap=plt.cm.Paired) plt.xlabel('Sepal length') plt.ylabel('Sepal width') plt.xlim(x_min, x_max) plt.ylim(y_min, y_max) plt.xticks(()) plt.yticks(()) # To getter a better understanding of interaction of the dimensions # plot the first three PCA dimensions fig = plt.figure(1, figsize=(8, 6)) ax = Axes3D(fig, elev=-150, azim=110) X_reduced = PCA(n_components=3).fit_transform(iris.data) ax.scatter(X_reduced[:, 0], X_reduced[:, 1], X_reduced[:, 2], c=Y, cmap=plt.cm.Paired) ax.set_title("First three PCA directions") ax.set_xlabel("1st eigenvector") ax.w_xaxis.set_ticklabels([]) ax.set_ylabel("2nd eigenvector") ax.w_yaxis.set_ticklabels([]) ax.set_zlabel("3rd eigenvector") ax.w_zaxis.set_ticklabels([]) plt.show()
bsd-3-clause
imaculate/scikit-learn
sklearn/neighbors/nearest_centroid.py
33
7347
# -*- coding: utf-8 -*- """ Nearest Centroid Classification """ # Author: Robert Layton <robertlayton@gmail.com> # Olivier Grisel <olivier.grisel@ensta.org> # # License: BSD 3 clause import warnings import numpy as np from scipy import sparse as sp from ..base import BaseEstimator, ClassifierMixin from ..metrics.pairwise import pairwise_distances from ..preprocessing import LabelEncoder from ..utils.validation import check_array, check_X_y, check_is_fitted from ..utils.sparsefuncs import csc_median_axis_0 from ..utils.multiclass import check_classification_targets class NearestCentroid(BaseEstimator, ClassifierMixin): """Nearest centroid classifier. Each class is represented by its centroid, with test samples classified to the class with the nearest centroid. Read more in the :ref:`User Guide <nearest_centroid_classifier>`. Parameters ---------- metric: string, or callable The metric to use when calculating distance between instances in a feature array. If metric is a string or callable, it must be one of the options allowed by metrics.pairwise.pairwise_distances for its metric parameter. The centroids for the samples corresponding to each class is the point from which the sum of the distances (according to the metric) of all samples that belong to that particular class are minimized. If the "manhattan" metric is provided, this centroid is the median and for all other metrics, the centroid is now set to be the mean. shrink_threshold : float, optional (default = None) Threshold for shrinking centroids to remove features. Attributes ---------- centroids_ : array-like, shape = [n_classes, n_features] Centroid of each class Examples -------- >>> from sklearn.neighbors.nearest_centroid import NearestCentroid >>> import numpy as np >>> X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]]) >>> y = np.array([1, 1, 1, 2, 2, 2]) >>> clf = NearestCentroid() >>> clf.fit(X, y) NearestCentroid(metric='euclidean', shrink_threshold=None) >>> print(clf.predict([[-0.8, -1]])) [1] See also -------- sklearn.neighbors.KNeighborsClassifier: nearest neighbors classifier Notes ----- When used for text classification with tf-idf vectors, this classifier is also known as the Rocchio classifier. References ---------- Tibshirani, R., Hastie, T., Narasimhan, B., & Chu, G. (2002). Diagnosis of multiple cancer types by shrunken centroids of gene expression. Proceedings of the National Academy of Sciences of the United States of America, 99(10), 6567-6572. The National Academy of Sciences. """ def __init__(self, metric='euclidean', shrink_threshold=None): self.metric = metric self.shrink_threshold = shrink_threshold def fit(self, X, y): """ Fit the NearestCentroid model according to the given training data. Parameters ---------- X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vector, where n_samples in the number of samples and n_features is the number of features. Note that centroid shrinking cannot be used with sparse matrices. y : array, shape = [n_samples] Target values (integers) """ # If X is sparse and the metric is "manhattan", store it in a csc # format is easier to calculate the median. if self.metric == 'manhattan': X, y = check_X_y(X, y, ['csc']) else: X, y = check_X_y(X, y, ['csr', 'csc']) is_X_sparse = sp.issparse(X) if is_X_sparse and self.shrink_threshold: raise ValueError("threshold shrinking not supported" " for sparse input") check_classification_targets(y) n_samples, n_features = X.shape le = LabelEncoder() y_ind = le.fit_transform(y) self.classes_ = classes = le.classes_ n_classes = classes.size if n_classes < 2: raise ValueError('y has less than 2 classes') # Mask mapping each class to its members. self.centroids_ = np.empty((n_classes, n_features), dtype=np.float64) # Number of clusters in each class. nk = np.zeros(n_classes) for cur_class in range(n_classes): center_mask = y_ind == cur_class nk[cur_class] = np.sum(center_mask) if is_X_sparse: center_mask = np.where(center_mask)[0] # XXX: Update other averaging methods according to the metrics. if self.metric == "manhattan": # NumPy does not calculate median of sparse matrices. if not is_X_sparse: self.centroids_[cur_class] = np.median(X[center_mask], axis=0) else: self.centroids_[cur_class] = csc_median_axis_0(X[center_mask]) else: if self.metric != 'euclidean': warnings.warn("Averaging for metrics other than " "euclidean and manhattan not supported. " "The average is set to be the mean." ) self.centroids_[cur_class] = X[center_mask].mean(axis=0) if self.shrink_threshold: dataset_centroid_ = np.mean(X, axis=0) # m parameter for determining deviation m = np.sqrt((1. / nk) + (1. / n_samples)) # Calculate deviation using the standard deviation of centroids. variance = (X - self.centroids_[y_ind]) ** 2 variance = variance.sum(axis=0) s = np.sqrt(variance / (n_samples - n_classes)) s += np.median(s) # To deter outliers from affecting the results. mm = m.reshape(len(m), 1) # Reshape to allow broadcasting. ms = mm * s deviation = ((self.centroids_ - dataset_centroid_) / ms) # Soft thresholding: if the deviation crosses 0 during shrinking, # it becomes zero. signs = np.sign(deviation) deviation = (np.abs(deviation) - self.shrink_threshold) deviation[deviation < 0] = 0 deviation *= signs # Now adjust the centroids using the deviation msd = ms * deviation self.centroids_ = dataset_centroid_[np.newaxis, :] + msd return self def predict(self, X): """Perform classification on an array of test vectors X. The predicted class C for each sample in X is returned. Parameters ---------- X : array-like, shape = [n_samples, n_features] Returns ------- C : array, shape = [n_samples] Notes ----- If the metric constructor parameter is "precomputed", X is assumed to be the distance matrix between the data to be predicted and ``self.centroids_``. """ check_is_fitted(self, 'centroids_') X = check_array(X, accept_sparse='csr') return self.classes_[pairwise_distances( X, self.centroids_, metric=self.metric).argmin(axis=1)]
bsd-3-clause
cybernet14/scikit-learn
sklearn/decomposition/truncated_svd.py
198
7744
"""Truncated SVD for sparse matrices, aka latent semantic analysis (LSA). """ # Author: Lars Buitinck <L.J.Buitinck@uva.nl> # Olivier Grisel <olivier.grisel@ensta.org> # Michael Becker <mike@beckerfuffle.com> # License: 3-clause BSD. import numpy as np import scipy.sparse as sp try: from scipy.sparse.linalg import svds except ImportError: from ..utils.arpack import svds from ..base import BaseEstimator, TransformerMixin from ..utils import check_array, as_float_array, check_random_state from ..utils.extmath import randomized_svd, safe_sparse_dot, svd_flip from ..utils.sparsefuncs import mean_variance_axis __all__ = ["TruncatedSVD"] class TruncatedSVD(BaseEstimator, TransformerMixin): """Dimensionality reduction using truncated SVD (aka LSA). This transformer performs linear dimensionality reduction by means of truncated singular value decomposition (SVD). It is very similar to PCA, but operates on sample vectors directly, instead of on a covariance matrix. This means it can work with scipy.sparse matrices efficiently. In particular, truncated SVD works on term count/tf-idf matrices as returned by the vectorizers in sklearn.feature_extraction.text. In that context, it is known as latent semantic analysis (LSA). This estimator supports two algorithm: a fast randomized SVD solver, and a "naive" algorithm that uses ARPACK as an eigensolver on (X * X.T) or (X.T * X), whichever is more efficient. Read more in the :ref:`User Guide <LSA>`. Parameters ---------- n_components : int, default = 2 Desired dimensionality of output data. Must be strictly less than the number of features. The default value is useful for visualisation. For LSA, a value of 100 is recommended. algorithm : string, default = "randomized" SVD solver to use. Either "arpack" for the ARPACK wrapper in SciPy (scipy.sparse.linalg.svds), or "randomized" for the randomized algorithm due to Halko (2009). n_iter : int, optional Number of iterations for randomized SVD solver. Not used by ARPACK. random_state : int or RandomState, optional (Seed for) pseudo-random number generator. If not given, the numpy.random singleton is used. tol : float, optional Tolerance for ARPACK. 0 means machine precision. Ignored by randomized SVD solver. Attributes ---------- components_ : array, shape (n_components, n_features) explained_variance_ratio_ : array, [n_components] Percentage of variance explained by each of the selected components. explained_variance_ : array, [n_components] The variance of the training samples transformed by a projection to each component. Examples -------- >>> from sklearn.decomposition import TruncatedSVD >>> from sklearn.random_projection import sparse_random_matrix >>> X = sparse_random_matrix(100, 100, density=0.01, random_state=42) >>> svd = TruncatedSVD(n_components=5, random_state=42) >>> svd.fit(X) # doctest: +NORMALIZE_WHITESPACE TruncatedSVD(algorithm='randomized', n_components=5, n_iter=5, random_state=42, tol=0.0) >>> print(svd.explained_variance_ratio_) # doctest: +ELLIPSIS [ 0.07825... 0.05528... 0.05445... 0.04997... 0.04134...] >>> print(svd.explained_variance_ratio_.sum()) # doctest: +ELLIPSIS 0.27930... See also -------- PCA RandomizedPCA References ---------- Finding structure with randomness: Stochastic algorithms for constructing approximate matrix decompositions Halko, et al., 2009 (arXiv:909) http://arxiv.org/pdf/0909.4061 Notes ----- SVD suffers from a problem called "sign indeterminancy", which means the sign of the ``components_`` and the output from transform depend on the algorithm and random state. To work around this, fit instances of this class to data once, then keep the instance around to do transformations. """ def __init__(self, n_components=2, algorithm="randomized", n_iter=5, random_state=None, tol=0.): self.algorithm = algorithm self.n_components = n_components self.n_iter = n_iter self.random_state = random_state self.tol = tol def fit(self, X, y=None): """Fit LSI model on training data X. Parameters ---------- X : {array-like, sparse matrix}, shape (n_samples, n_features) Training data. Returns ------- self : object Returns the transformer object. """ self.fit_transform(X) return self def fit_transform(self, X, y=None): """Fit LSI model to X and perform dimensionality reduction on X. Parameters ---------- X : {array-like, sparse matrix}, shape (n_samples, n_features) Training data. Returns ------- X_new : array, shape (n_samples, n_components) Reduced version of X. This will always be a dense array. """ X = as_float_array(X, copy=False) random_state = check_random_state(self.random_state) # If sparse and not csr or csc, convert to csr if sp.issparse(X) and X.getformat() not in ["csr", "csc"]: X = X.tocsr() if self.algorithm == "arpack": U, Sigma, VT = svds(X, k=self.n_components, tol=self.tol) # svds doesn't abide by scipy.linalg.svd/randomized_svd # conventions, so reverse its outputs. Sigma = Sigma[::-1] U, VT = svd_flip(U[:, ::-1], VT[::-1]) elif self.algorithm == "randomized": k = self.n_components n_features = X.shape[1] if k >= n_features: raise ValueError("n_components must be < n_features;" " got %d >= %d" % (k, n_features)) U, Sigma, VT = randomized_svd(X, self.n_components, n_iter=self.n_iter, random_state=random_state) else: raise ValueError("unknown algorithm %r" % self.algorithm) self.components_ = VT # Calculate explained variance & explained variance ratio X_transformed = np.dot(U, np.diag(Sigma)) self.explained_variance_ = exp_var = np.var(X_transformed, axis=0) if sp.issparse(X): _, full_var = mean_variance_axis(X, axis=0) full_var = full_var.sum() else: full_var = np.var(X, axis=0).sum() self.explained_variance_ratio_ = exp_var / full_var return X_transformed def transform(self, X): """Perform dimensionality reduction on X. Parameters ---------- X : {array-like, sparse matrix}, shape (n_samples, n_features) New data. Returns ------- X_new : array, shape (n_samples, n_components) Reduced version of X. This will always be a dense array. """ X = check_array(X, accept_sparse='csr') return safe_sparse_dot(X, self.components_.T) def inverse_transform(self, X): """Transform X back to its original space. Returns an array X_original whose transform would be X. Parameters ---------- X : array-like, shape (n_samples, n_components) New data. Returns ------- X_original : array, shape (n_samples, n_features) Note that this is always a dense array. """ X = check_array(X) return np.dot(X, self.components_)
bsd-3-clause
voxlol/scikit-learn
sklearn/neighbors/tests/test_nearest_centroid.py
302
4121
""" Testing for the nearest centroid module. """ import numpy as np from scipy import sparse as sp from numpy.testing import assert_array_equal from numpy.testing import assert_equal from sklearn.neighbors import NearestCentroid from sklearn import datasets from sklearn.metrics.pairwise import pairwise_distances # toy sample X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]] X_csr = sp.csr_matrix(X) # Sparse matrix y = [-1, -1, -1, 1, 1, 1] T = [[-1, -1], [2, 2], [3, 2]] T_csr = sp.csr_matrix(T) true_result = [-1, 1, 1] # also load the iris dataset # and randomly permute it iris = datasets.load_iris() rng = np.random.RandomState(1) perm = rng.permutation(iris.target.size) iris.data = iris.data[perm] iris.target = iris.target[perm] def test_classification_toy(): # Check classification on a toy dataset, including sparse versions. clf = NearestCentroid() clf.fit(X, y) assert_array_equal(clf.predict(T), true_result) # Same test, but with a sparse matrix to fit and test. clf = NearestCentroid() clf.fit(X_csr, y) assert_array_equal(clf.predict(T_csr), true_result) # Fit with sparse, test with non-sparse clf = NearestCentroid() clf.fit(X_csr, y) assert_array_equal(clf.predict(T), true_result) # Fit with non-sparse, test with sparse clf = NearestCentroid() clf.fit(X, y) assert_array_equal(clf.predict(T_csr), true_result) # Fit and predict with non-CSR sparse matrices clf = NearestCentroid() clf.fit(X_csr.tocoo(), y) assert_array_equal(clf.predict(T_csr.tolil()), true_result) def test_precomputed(): clf = NearestCentroid(metric="precomputed") clf.fit(X, y) S = pairwise_distances(T, clf.centroids_) assert_array_equal(clf.predict(S), true_result) def test_iris(): # Check consistency on dataset iris. for metric in ('euclidean', 'cosine'): clf = NearestCentroid(metric=metric).fit(iris.data, iris.target) score = np.mean(clf.predict(iris.data) == iris.target) assert score > 0.9, "Failed with score = " + str(score) def test_iris_shrinkage(): # Check consistency on dataset iris, when using shrinkage. for metric in ('euclidean', 'cosine'): for shrink_threshold in [None, 0.1, 0.5]: clf = NearestCentroid(metric=metric, shrink_threshold=shrink_threshold) clf = clf.fit(iris.data, iris.target) score = np.mean(clf.predict(iris.data) == iris.target) assert score > 0.8, "Failed with score = " + str(score) def test_pickle(): import pickle # classification obj = NearestCentroid() obj.fit(iris.data, iris.target) score = obj.score(iris.data, iris.target) s = pickle.dumps(obj) obj2 = pickle.loads(s) assert_equal(type(obj2), obj.__class__) score2 = obj2.score(iris.data, iris.target) assert_array_equal(score, score2, "Failed to generate same score" " after pickling (classification).") def test_shrinkage_threshold_decoded_y(): clf = NearestCentroid(shrink_threshold=0.01) y_ind = np.asarray(y) y_ind[y_ind == -1] = 0 clf.fit(X, y_ind) centroid_encoded = clf.centroids_ clf.fit(X, y) assert_array_equal(centroid_encoded, clf.centroids_) def test_predict_translated_data(): # Test that NearestCentroid gives same results on translated data rng = np.random.RandomState(0) X = rng.rand(50, 50) y = rng.randint(0, 3, 50) noise = rng.rand(50) clf = NearestCentroid(shrink_threshold=0.1) clf.fit(X, y) y_init = clf.predict(X) clf = NearestCentroid(shrink_threshold=0.1) X_noise = X + noise clf.fit(X_noise, y) y_translate = clf.predict(X_noise) assert_array_equal(y_init, y_translate) def test_manhattan_metric(): # Test the manhattan metric. clf = NearestCentroid(metric='manhattan') clf.fit(X, y) dense_centroid = clf.centroids_ clf.fit(X_csr, y) assert_array_equal(clf.centroids_, dense_centroid) assert_array_equal(dense_centroid, [[-1, -1], [1, 1]])
bsd-3-clause
Titan-C/scikit-learn
examples/svm/plot_weighted_samples.py
36
1963
""" ===================== SVM: Weighted samples ===================== Plot decision function of a weighted dataset, where the size of points is proportional to its weight. The sample weighting rescales the C parameter, which means that the classifier puts more emphasis on getting these points right. The effect might often be subtle. To emphasize the effect here, we particularly weight outliers, making the deformation of the decision boundary very visible. """ print(__doc__) import numpy as np import matplotlib.pyplot as plt from sklearn import svm def plot_decision_function(classifier, sample_weight, axis, title): # plot the decision function xx, yy = np.meshgrid(np.linspace(-4, 5, 500), np.linspace(-4, 5, 500)) Z = classifier.decision_function(np.c_[xx.ravel(), yy.ravel()]) Z = Z.reshape(xx.shape) # plot the line, the points, and the nearest vectors to the plane axis.contourf(xx, yy, Z, alpha=0.75, cmap=plt.cm.bone) axis.scatter(X[:, 0], X[:, 1], c=y, s=100 * sample_weight, alpha=0.9, cmap=plt.cm.bone, edgecolors='black') axis.axis('off') axis.set_title(title) # we create 20 points np.random.seed(0) X = np.r_[np.random.randn(10, 2) + [1, 1], np.random.randn(10, 2)] y = [1] * 10 + [-1] * 10 sample_weight_last_ten = abs(np.random.randn(len(X))) sample_weight_constant = np.ones(len(X)) # and bigger weights to some outliers sample_weight_last_ten[15:] *= 5 sample_weight_last_ten[9] *= 15 # for reference, first fit without class weights # fit the model clf_weights = svm.SVC() clf_weights.fit(X, y, sample_weight=sample_weight_last_ten) clf_no_weights = svm.SVC() clf_no_weights.fit(X, y) fig, axes = plt.subplots(1, 2, figsize=(14, 6)) plot_decision_function(clf_no_weights, sample_weight_constant, axes[0], "Constant weights") plot_decision_function(clf_weights, sample_weight_last_ten, axes[1], "Modified weights") plt.show()
bsd-3-clause
Titan-C/scikit-learn
examples/model_selection/plot_roc_crossval.py
16
3697
""" ============================================================= Receiver Operating Characteristic (ROC) with cross validation ============================================================= Example of Receiver Operating Characteristic (ROC) metric to evaluate classifier output quality using cross-validation. ROC curves typically feature true positive rate on the Y axis, and false positive rate on the X axis. This means that the top left corner of the plot is the "ideal" point - a false positive rate of zero, and a true positive rate of one. This is not very realistic, but it does mean that a larger area under the curve (AUC) is usually better. The "steepness" of ROC curves is also important, since it is ideal to maximize the true positive rate while minimizing the false positive rate. This example shows the ROC response of different datasets, created from K-fold cross-validation. Taking all of these curves, it is possible to calculate the mean area under curve, and see the variance of the curve when the training set is split into different subsets. This roughly shows how the classifier output is affected by changes in the training data, and how different the splits generated by K-fold cross-validation are from one another. .. note:: See also :func:`sklearn.metrics.auc_score`, :func:`sklearn.model_selection.cross_val_score`, :ref:`sphx_glr_auto_examples_model_selection_plot_roc.py`, """ print(__doc__) import numpy as np from scipy import interp import matplotlib.pyplot as plt from itertools import cycle from sklearn import svm, datasets from sklearn.metrics import roc_curve, auc from sklearn.model_selection import StratifiedKFold # ############################################################################# # Data IO and generation # Import some data to play with iris = datasets.load_iris() X = iris.data y = iris.target X, y = X[y != 2], y[y != 2] n_samples, n_features = X.shape # Add noisy features random_state = np.random.RandomState(0) X = np.c_[X, random_state.randn(n_samples, 200 * n_features)] # ############################################################################# # Classification and ROC analysis # Run classifier with cross-validation and plot ROC curves cv = StratifiedKFold(n_splits=6) classifier = svm.SVC(kernel='linear', probability=True, random_state=random_state) tprs = [] aucs = [] mean_fpr = np.linspace(0, 1, 100) i = 0 for train, test in cv.split(X, y): probas_ = classifier.fit(X[train], y[train]).predict_proba(X[test]) # Compute ROC curve and area the curve fpr, tpr, thresholds = roc_curve(y[test], probas_[:, 1]) tprs.append(interp(mean_fpr, fpr, tpr)) tprs[-1][0] = 0.0 roc_auc = auc(fpr, tpr) aucs.append(roc_auc) plt.plot(fpr, tpr, lw=1, alpha=0.3, label='ROC fold %d (AUC = %0.2f)' % (i, roc_auc)) i += 1 plt.plot([0, 1], [0, 1], linestyle='--', lw=2, color='r', label='Luck', alpha=.8) mean_tpr = np.mean(tprs, axis=0) mean_tpr[-1] = 1.0 mean_auc = auc(mean_fpr, mean_tpr) std_auc = np.std(aucs) plt.plot(mean_fpr, mean_tpr, color='b', label=r'Mean ROC (AUC = %0.2f $\pm$ %0.2f)' % (mean_auc, std_auc), lw=2, alpha=.8) std_tpr = np.std(tprs, axis=0) tprs_upper = np.minimum(mean_tpr + std_tpr, 1) tprs_lower = np.maximum(mean_tpr - std_tpr, 0) plt.fill_between(mean_fpr, tprs_lower, tprs_upper, color='grey', alpha=.2, label=r'$\pm$ 1 std. dev.') plt.xlim([-0.05, 1.05]) plt.ylim([-0.05, 1.05]) plt.xlabel('False Positive Rate') plt.ylabel('True Positive Rate') plt.title('Receiver operating characteristic example') plt.legend(loc="lower right") plt.show()
bsd-3-clause
massmutual/scikit-learn
sklearn/tree/tests/test_export.py
130
9950
""" Testing for export functions of decision trees (sklearn.tree.export). """ from re import finditer from numpy.testing import assert_equal from nose.tools import assert_raises from sklearn.tree import DecisionTreeClassifier, DecisionTreeRegressor from sklearn.ensemble import GradientBoostingClassifier from sklearn.tree import export_graphviz from sklearn.externals.six import StringIO from sklearn.utils.testing import assert_in # toy sample X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]] y = [-1, -1, -1, 1, 1, 1] y2 = [[-1, 1], [-1, 2], [-1, 3], [1, 1], [1, 2], [1, 3]] w = [1, 1, 1, .5, .5, .5] def test_graphviz_toy(): # Check correctness of export_graphviz clf = DecisionTreeClassifier(max_depth=3, min_samples_split=1, criterion="gini", random_state=2) clf.fit(X, y) # Test export code out = StringIO() export_graphviz(clf, out_file=out) contents1 = out.getvalue() contents2 = 'digraph Tree {\n' \ 'node [shape=box] ;\n' \ '0 [label="X[0] <= 0.0\\ngini = 0.5\\nsamples = 6\\n' \ 'value = [3, 3]"] ;\n' \ '1 [label="gini = 0.0\\nsamples = 3\\nvalue = [3, 0]"] ;\n' \ '0 -> 1 [labeldistance=2.5, labelangle=45, ' \ 'headlabel="True"] ;\n' \ '2 [label="gini = 0.0\\nsamples = 3\\nvalue = [0, 3]"] ;\n' \ '0 -> 2 [labeldistance=2.5, labelangle=-45, ' \ 'headlabel="False"] ;\n' \ '}' assert_equal(contents1, contents2) # Test with feature_names out = StringIO() export_graphviz(clf, out_file=out, feature_names=["feature0", "feature1"]) contents1 = out.getvalue() contents2 = 'digraph Tree {\n' \ 'node [shape=box] ;\n' \ '0 [label="feature0 <= 0.0\\ngini = 0.5\\nsamples = 6\\n' \ 'value = [3, 3]"] ;\n' \ '1 [label="gini = 0.0\\nsamples = 3\\nvalue = [3, 0]"] ;\n' \ '0 -> 1 [labeldistance=2.5, labelangle=45, ' \ 'headlabel="True"] ;\n' \ '2 [label="gini = 0.0\\nsamples = 3\\nvalue = [0, 3]"] ;\n' \ '0 -> 2 [labeldistance=2.5, labelangle=-45, ' \ 'headlabel="False"] ;\n' \ '}' assert_equal(contents1, contents2) # Test with class_names out = StringIO() export_graphviz(clf, out_file=out, class_names=["yes", "no"]) contents1 = out.getvalue() contents2 = 'digraph Tree {\n' \ 'node [shape=box] ;\n' \ '0 [label="X[0] <= 0.0\\ngini = 0.5\\nsamples = 6\\n' \ 'value = [3, 3]\\nclass = yes"] ;\n' \ '1 [label="gini = 0.0\\nsamples = 3\\nvalue = [3, 0]\\n' \ 'class = yes"] ;\n' \ '0 -> 1 [labeldistance=2.5, labelangle=45, ' \ 'headlabel="True"] ;\n' \ '2 [label="gini = 0.0\\nsamples = 3\\nvalue = [0, 3]\\n' \ 'class = no"] ;\n' \ '0 -> 2 [labeldistance=2.5, labelangle=-45, ' \ 'headlabel="False"] ;\n' \ '}' assert_equal(contents1, contents2) # Test plot_options out = StringIO() export_graphviz(clf, out_file=out, filled=True, impurity=False, proportion=True, special_characters=True, rounded=True) contents1 = out.getvalue() contents2 = 'digraph Tree {\n' \ 'node [shape=box, style="filled, rounded", color="black", ' \ 'fontname=helvetica] ;\n' \ 'edge [fontname=helvetica] ;\n' \ '0 [label=<X<SUB>0</SUB> &le; 0.0<br/>samples = 100.0%<br/>' \ 'value = [0.5, 0.5]>, fillcolor="#e5813900"] ;\n' \ '1 [label=<samples = 50.0%<br/>value = [1.0, 0.0]>, ' \ 'fillcolor="#e58139ff"] ;\n' \ '0 -> 1 [labeldistance=2.5, labelangle=45, ' \ 'headlabel="True"] ;\n' \ '2 [label=<samples = 50.0%<br/>value = [0.0, 1.0]>, ' \ 'fillcolor="#399de5ff"] ;\n' \ '0 -> 2 [labeldistance=2.5, labelangle=-45, ' \ 'headlabel="False"] ;\n' \ '}' assert_equal(contents1, contents2) # Test max_depth out = StringIO() export_graphviz(clf, out_file=out, max_depth=0, class_names=True) contents1 = out.getvalue() contents2 = 'digraph Tree {\n' \ 'node [shape=box] ;\n' \ '0 [label="X[0] <= 0.0\\ngini = 0.5\\nsamples = 6\\n' \ 'value = [3, 3]\\nclass = y[0]"] ;\n' \ '1 [label="(...)"] ;\n' \ '0 -> 1 ;\n' \ '2 [label="(...)"] ;\n' \ '0 -> 2 ;\n' \ '}' assert_equal(contents1, contents2) # Test max_depth with plot_options out = StringIO() export_graphviz(clf, out_file=out, max_depth=0, filled=True, node_ids=True) contents1 = out.getvalue() contents2 = 'digraph Tree {\n' \ 'node [shape=box, style="filled", color="black"] ;\n' \ '0 [label="node #0\\nX[0] <= 0.0\\ngini = 0.5\\n' \ 'samples = 6\\nvalue = [3, 3]", fillcolor="#e5813900"] ;\n' \ '1 [label="(...)", fillcolor="#C0C0C0"] ;\n' \ '0 -> 1 ;\n' \ '2 [label="(...)", fillcolor="#C0C0C0"] ;\n' \ '0 -> 2 ;\n' \ '}' assert_equal(contents1, contents2) # Test multi-output with weighted samples clf = DecisionTreeClassifier(max_depth=2, min_samples_split=1, criterion="gini", random_state=2) clf = clf.fit(X, y2, sample_weight=w) out = StringIO() export_graphviz(clf, out_file=out, filled=True, impurity=False) contents1 = out.getvalue() contents2 = 'digraph Tree {\n' \ 'node [shape=box, style="filled", color="black"] ;\n' \ '0 [label="X[0] <= 0.0\\nsamples = 6\\n' \ 'value = [[3.0, 1.5, 0.0]\\n' \ '[1.5, 1.5, 1.5]]", fillcolor="#e5813900"] ;\n' \ '1 [label="X[1] <= -1.5\\nsamples = 3\\n' \ 'value = [[3, 0, 0]\\n[1, 1, 1]]", ' \ 'fillcolor="#e5813965"] ;\n' \ '0 -> 1 [labeldistance=2.5, labelangle=45, ' \ 'headlabel="True"] ;\n' \ '2 [label="samples = 1\\nvalue = [[1, 0, 0]\\n' \ '[0, 0, 1]]", fillcolor="#e58139ff"] ;\n' \ '1 -> 2 ;\n' \ '3 [label="samples = 2\\nvalue = [[2, 0, 0]\\n' \ '[1, 1, 0]]", fillcolor="#e581398c"] ;\n' \ '1 -> 3 ;\n' \ '4 [label="X[0] <= 1.5\\nsamples = 3\\n' \ 'value = [[0.0, 1.5, 0.0]\\n[0.5, 0.5, 0.5]]", ' \ 'fillcolor="#e5813965"] ;\n' \ '0 -> 4 [labeldistance=2.5, labelangle=-45, ' \ 'headlabel="False"] ;\n' \ '5 [label="samples = 2\\nvalue = [[0.0, 1.0, 0.0]\\n' \ '[0.5, 0.5, 0.0]]", fillcolor="#e581398c"] ;\n' \ '4 -> 5 ;\n' \ '6 [label="samples = 1\\nvalue = [[0.0, 0.5, 0.0]\\n' \ '[0.0, 0.0, 0.5]]", fillcolor="#e58139ff"] ;\n' \ '4 -> 6 ;\n' \ '}' assert_equal(contents1, contents2) # Test regression output with plot_options clf = DecisionTreeRegressor(max_depth=3, min_samples_split=1, criterion="mse", random_state=2) clf.fit(X, y) out = StringIO() export_graphviz(clf, out_file=out, filled=True, leaves_parallel=True, rotate=True, rounded=True) contents1 = out.getvalue() contents2 = 'digraph Tree {\n' \ 'node [shape=box, style="filled, rounded", color="black", ' \ 'fontname=helvetica] ;\n' \ 'graph [ranksep=equally, splines=polyline] ;\n' \ 'edge [fontname=helvetica] ;\n' \ 'rankdir=LR ;\n' \ '0 [label="X[0] <= 0.0\\nmse = 1.0\\nsamples = 6\\n' \ 'value = 0.0", fillcolor="#e581397f"] ;\n' \ '1 [label="mse = 0.0\\nsamples = 3\\nvalue = -1.0", ' \ 'fillcolor="#e5813900"] ;\n' \ '0 -> 1 [labeldistance=2.5, labelangle=-45, ' \ 'headlabel="True"] ;\n' \ '2 [label="mse = 0.0\\nsamples = 3\\nvalue = 1.0", ' \ 'fillcolor="#e58139ff"] ;\n' \ '0 -> 2 [labeldistance=2.5, labelangle=45, ' \ 'headlabel="False"] ;\n' \ '{rank=same ; 0} ;\n' \ '{rank=same ; 1; 2} ;\n' \ '}' assert_equal(contents1, contents2) def test_graphviz_errors(): # Check for errors of export_graphviz clf = DecisionTreeClassifier(max_depth=3, min_samples_split=1) clf.fit(X, y) # Check feature_names error out = StringIO() assert_raises(IndexError, export_graphviz, clf, out, feature_names=[]) # Check class_names error out = StringIO() assert_raises(IndexError, export_graphviz, clf, out, class_names=[]) def test_friedman_mse_in_graphviz(): clf = DecisionTreeRegressor(criterion="friedman_mse", random_state=0) clf.fit(X, y) dot_data = StringIO() export_graphviz(clf, out_file=dot_data) clf = GradientBoostingClassifier(n_estimators=2, random_state=0) clf.fit(X, y) for estimator in clf.estimators_: export_graphviz(estimator[0], out_file=dot_data) for finding in finditer("\[.*?samples.*?\]", dot_data.getvalue()): assert_in("friedman_mse", finding.group())
bsd-3-clause
massmutual/scikit-learn
examples/cluster/plot_kmeans_assumptions.py
267
2040
""" ==================================== Demonstration of k-means assumptions ==================================== This example is meant to illustrate situations where k-means will produce unintuitive and possibly unexpected clusters. In the first three plots, the input data does not conform to some implicit assumption that k-means makes and undesirable clusters are produced as a result. In the last plot, k-means returns intuitive clusters despite unevenly sized blobs. """ print(__doc__) # Author: Phil Roth <mr.phil.roth@gmail.com> # License: BSD 3 clause import numpy as np import matplotlib.pyplot as plt from sklearn.cluster import KMeans from sklearn.datasets import make_blobs plt.figure(figsize=(12, 12)) n_samples = 1500 random_state = 170 X, y = make_blobs(n_samples=n_samples, random_state=random_state) # Incorrect number of clusters y_pred = KMeans(n_clusters=2, random_state=random_state).fit_predict(X) plt.subplot(221) plt.scatter(X[:, 0], X[:, 1], c=y_pred) plt.title("Incorrect Number of Blobs") # Anisotropicly distributed data transformation = [[ 0.60834549, -0.63667341], [-0.40887718, 0.85253229]] X_aniso = np.dot(X, transformation) y_pred = KMeans(n_clusters=3, random_state=random_state).fit_predict(X_aniso) plt.subplot(222) plt.scatter(X_aniso[:, 0], X_aniso[:, 1], c=y_pred) plt.title("Anisotropicly Distributed Blobs") # Different variance X_varied, y_varied = make_blobs(n_samples=n_samples, cluster_std=[1.0, 2.5, 0.5], random_state=random_state) y_pred = KMeans(n_clusters=3, random_state=random_state).fit_predict(X_varied) plt.subplot(223) plt.scatter(X_varied[:, 0], X_varied[:, 1], c=y_pred) plt.title("Unequal Variance") # Unevenly sized blobs X_filtered = np.vstack((X[y == 0][:500], X[y == 1][:100], X[y == 2][:10])) y_pred = KMeans(n_clusters=3, random_state=random_state).fit_predict(X_filtered) plt.subplot(224) plt.scatter(X_filtered[:, 0], X_filtered[:, 1], c=y_pred) plt.title("Unevenly Sized Blobs") plt.show()
bsd-3-clause
walterreade/scikit-learn
examples/calibration/plot_compare_calibration.py
81
5012
""" ======================================== Comparison of Calibration of Classifiers ======================================== Well calibrated classifiers are probabilistic classifiers for which the output of the predict_proba method can be directly interpreted as a confidence level. For instance a well calibrated (binary) classifier should classify the samples such that among the samples to which it gave a predict_proba value close to 0.8, approx. 80% actually belong to the positive class. LogisticRegression returns well calibrated predictions as it directly optimizes log-loss. In contrast, the other methods return biased probabilities, with different biases per method: * GaussianNaiveBayes tends to push probabilities to 0 or 1 (note the counts in the histograms). This is mainly because it makes the assumption that features are conditionally independent given the class, which is not the case in this dataset which contains 2 redundant features. * RandomForestClassifier shows the opposite behavior: the histograms show peaks at approx. 0.2 and 0.9 probability, while probabilities close to 0 or 1 are very rare. An explanation for this is given by Niculescu-Mizil and Caruana [1]: "Methods such as bagging and random forests that average predictions from a base set of models can have difficulty making predictions near 0 and 1 because variance in the underlying base models will bias predictions that should be near zero or one away from these values. Because predictions are restricted to the interval [0,1], errors caused by variance tend to be one- sided near zero and one. For example, if a model should predict p = 0 for a case, the only way bagging can achieve this is if all bagged trees predict zero. If we add noise to the trees that bagging is averaging over, this noise will cause some trees to predict values larger than 0 for this case, thus moving the average prediction of the bagged ensemble away from 0. We observe this effect most strongly with random forests because the base-level trees trained with random forests have relatively high variance due to feature subseting." As a result, the calibration curve shows a characteristic sigmoid shape, indicating that the classifier could trust its "intuition" more and return probabilities closer to 0 or 1 typically. * Support Vector Classification (SVC) shows an even more sigmoid curve as the RandomForestClassifier, which is typical for maximum-margin methods (compare Niculescu-Mizil and Caruana [1]), which focus on hard samples that are close to the decision boundary (the support vectors). .. topic:: References: .. [1] Predicting Good Probabilities with Supervised Learning, A. Niculescu-Mizil & R. Caruana, ICML 2005 """ print(__doc__) # Author: Jan Hendrik Metzen <jhm@informatik.uni-bremen.de> # License: BSD Style. import numpy as np np.random.seed(0) import matplotlib.pyplot as plt from sklearn import datasets from sklearn.naive_bayes import GaussianNB from sklearn.linear_model import LogisticRegression from sklearn.ensemble import RandomForestClassifier from sklearn.svm import LinearSVC from sklearn.calibration import calibration_curve X, y = datasets.make_classification(n_samples=100000, n_features=20, n_informative=2, n_redundant=2) train_samples = 100 # Samples used for training the models X_train = X[:train_samples] X_test = X[train_samples:] y_train = y[:train_samples] y_test = y[train_samples:] # Create classifiers lr = LogisticRegression() gnb = GaussianNB() svc = LinearSVC(C=1.0) rfc = RandomForestClassifier(n_estimators=100) ############################################################################### # Plot calibration plots plt.figure(figsize=(10, 10)) ax1 = plt.subplot2grid((3, 1), (0, 0), rowspan=2) ax2 = plt.subplot2grid((3, 1), (2, 0)) ax1.plot([0, 1], [0, 1], "k:", label="Perfectly calibrated") for clf, name in [(lr, 'Logistic'), (gnb, 'Naive Bayes'), (svc, 'Support Vector Classification'), (rfc, 'Random Forest')]: clf.fit(X_train, y_train) if hasattr(clf, "predict_proba"): prob_pos = clf.predict_proba(X_test)[:, 1] else: # use decision function prob_pos = clf.decision_function(X_test) prob_pos = \ (prob_pos - prob_pos.min()) / (prob_pos.max() - prob_pos.min()) fraction_of_positives, mean_predicted_value = \ calibration_curve(y_test, prob_pos, n_bins=10) ax1.plot(mean_predicted_value, fraction_of_positives, "s-", label="%s" % (name, )) ax2.hist(prob_pos, range=(0, 1), bins=10, label=name, histtype="step", lw=2) ax1.set_ylabel("Fraction of positives") ax1.set_ylim([-0.05, 1.05]) ax1.legend(loc="lower right") ax1.set_title('Calibration plots (reliability curve)') ax2.set_xlabel("Mean predicted value") ax2.set_ylabel("Count") ax2.legend(loc="upper center", ncol=2) plt.tight_layout() plt.show()
bsd-3-clause
RDCEP/ggcmi
bin/qcontrol/ggcmi.QC.phase2.py
1
21533
#!/usr/bin/env python # import modules from os import listdir from os.path import sep, exists from optparse import OptionParser from netCDF4 import Dataset as nc from numpy.ma import isMaskedArray, masked_where from numpy import inf, float32, zeros, ones, where, diff, isnan, array_equal, logical_and def climate_years(clim_names): clim_names_list = ['PGFv2', 'AgCFSR', 'AgMERRA', 'CFSR', 'ERAI', 'GRASP', \ 'Princeton', 'WATCH', 'WFDEI.CRU', 'WFDEI.GPCC', 'GSWP3', 'phase2'] year_range_list = [[1901, 2012], [1980, 2010], [1980, 2010], [1980, 2010], [1979, 2010], [1961, 2010], \ [1948, 2008], [1958, 2001], [1979, 2009], [1979, 2009], [1901, 2010], [1980, 2010]] yrs = [0] * len(clim_names) for i in range(len(clim_names)): if clim_names[i] in clim_names_list: yrs[i] = year_range_list[clim_names_list.index(clim_names[i])] else: raise Exception('Unknown climate') return yrs def abr_crop_names(crop_names): full_names_list = ['maize', 'winter_wheat', 'spring_wheat', 'soy', 'rice', 'sorghum', 'millet', \ 'managed_grass', 'sugarcane', 'barley', 'oat', 'rapeseed', \ 'rye', 'sugar_beet', 'sunflower'] abr_names_list = ['mai', 'wwh', 'swh', 'soy', 'ric', 'sor', 'mil', \ 'mgr', 'sug', 'bar', 'oat', 'rap', \ 'rye', 'sgb', 'sun'] abr_names = [0] * len(crop_names) for i in range(len(crop_names)): if crop_names[i] in full_names_list: abr_names[i] = abr_names_list[full_names_list.index(crop_names[i])] else: raise Exception('Unknown crop') return abr_names # parse inputs parser = OptionParser() parser.add_option("-d", "--dir", dest = "dir", default = "AgMIP.output", type = "string", help = "Directory in which to perform verification") parser.add_option("-m", "--mod", dest = "mod", default = "pDSSAT,pAPSIM", type = "string", help = "Comma-separated list of crop models to verify (* = all models)") parser.add_option("-w", "--weath", dest = "weath", default = "PGFv2,AgCFSR,AgMERRA,CFSR,ERAI,GRASP,Princeton,WATCH,WFDEI.CRU,WFDEI.GPCC", type = "string", help = "Comma-separated list of weather datasets to verify (* = all weather datasets)") parser.add_option("-c", "--crop", dest = "crop", default = "maize", type = "string", help = "Comma-separated list of crops to verify (* = all crops, except 'others')") parser.add_option("-s", "--sumdir", dest = "sumdir", default = "", type = "string", help = "Where to save summary report file(s)") options, args = parser.parse_args() dir = options.dir if dir[-1] == sep: dir = dir[: -1] # remove final separator # crop models if options.mod == '*': dirs = listdir(dir) # process all models if 'upload_stats' in dirs: dirs.remove('upload_stats') if 'aggregations' in dirs: dirs.remove('aggregations') else: dirs = options.mod.split(',') # climate data if options.weath == '*': climmodels = listdir(dir + sep + dirs[0]) # process all climate data, based on first directory else: climmodels = options.weath.split(',') climyears = climate_years(climmodels) numclims = len(climmodels) # crops if options.crop == '*': crops = listdir(dir + sep + dirs[0] + sep + climmodels[0]) if 'others' in crops: crops.remove('others') else: crops = options.crop.split(',') cropabbr = abr_crop_names(crops) numcrops = len(crops) # scenarios #scens = ['fullharm_noirr', 'fullharm_firr', 'default_noirr', 'default_firr', 'harmnon_noirr', 'harmnon_firr'] #dssat_scens = scens + ['fullharm_noirr_pt', 'fullharm_firr_pt'] # scenarios for pDSSAT and EPIC models #epic_scens = scens + \ # ['fullharm_noirr_pm', 'fullharm_firr_pm'] + \ # ['fullharm_noirr_pt', 'fullharm_firr_pt'] + \ # ['fullharm_noirr_hg', 'fullharm_firr_hg'] + \ # ['fullharm_noirr_br', 'fullharm_firr_br'] scens = ['fullharm'] # variables vlist = ['yield', 'pirrww', \ 'plant-day', 'maty-day', 'aet', 'initr', 'gsprcp', \ 'anth-day', 'biom', 'leach', 'gsrsds', 'sumt', 'sco2', 'sn2o'] vranges = [[0, 50], [0, 10000], \ [1, 366], [1, 366], [0, 10000], [0, 1000], [0, 10000], \ [1, 366], [0, 100], [0, 1000], [0, 100000], [-inf, inf], [-inf, inf], [-inf, inf]] vunits = ['t ha-1 yr-1', 'mm yr-1', \ 'day of year', 'days from planting', 'mm yr-1', 'kg ha-1 yr-1', 'mm yr-1', \ 'days from planting', 't ha-1 yr-1', 'kg ha-1 yr-1', 'w m-2 yr-1', 'deg C-days yr-1', 'kg C ha-1', 'kg N2O-N ha-1'] numtotvars = len(vlist) mandatoryvars = zeros((numtotvars,), dtype = bool) mandatoryvars[: 2] = [True, True] # indices for parsing filenames fileindices = {'scen': [3, 4], 'var': 5, 'crop': 6, \ 'annual': 7, 'yr0': 8, 'yr1': 9} fileindices2 = {'scen': [3, 4, 5], 'var': 6, 'crop': 7, \ 'annual': 8, 'yr0': 9, 'yr1': 10} fileindices3 = {'scen': 2, 'var': 3, 'crop': 4, \ 'annual': 6, 'yr0': 7, 'yr1': 8} # iterate through all permutations for d in dirs: msg_sims = '' # missing simulations msg_data = '' # missing data within a simulation changes = '' # changes to make errors = '' # errors in data info = '' # basic file info fatal = '' # fatal errors #if 'pDSSAT' in d: # scenarios = dssat_scens #elif 'EPIC' in d: # scenarios = epiic_scens #else: scenarios = scens # for marking missing data msg_datmat = ones((numclims, numcrops, numtotvars, len(scenarios)), dtype = bool) for cm in range(len(climmodels)): for cp in range(len(crops)): sim = '{:15s}{:15s}{:15s}'.format(d, climmodels[cm], crops[cp]) if crops[cp] == 'winterwheat': subdir = dir + sep + d + sep + climmodels[cm] + sep + 'wheat' else : subdir = dir + sep + d + sep + climmodels[cm] + sep + crops[cp] print 'Processing directory', subdir, '. . .' # check if simulation exists if not exists(subdir) or not len(listdir(subdir)): msg_sims += sim + '\n' continue # iterate over all files files = listdir(subdir) #print 'files', files, '\n' for f in files: fileparts = f.split('_') #print 'fileparts', fileparts, '\n' if len(fileparts) == 10: fi = fileindices elif len(fileparts) == 11: fi = fileindices2 elif len(fileparts) == 14: fi = fileindices3 else: continue # invalid file format scenidx = fi['scen']; varidx = fi['var'] cropidx = fi['crop']; annidx = fi['annual'] yr0idx = fi['yr0']; yr1idx = fi['yr1'] #print fi, '\n', 'scen', scenidx, 'crop', cropidx, 'yr0', yr0idx,'var', varidx, 'ann',annidx, 'yr1',yr1idx, '\n' # for basic file info info_datmat = zeros((7,), dtype = '|S128') info_datmat[:] = 'N/A' info_datmat[0] = f # variable varname = fileparts[varidx].lower() #print 'varname', varname,fileparts[varidx],fileparts[4], vlist, '\n' if varname in vlist: varfullidx = vlist.index(varname) else: continue # cannot find variable # scenario scen = fileparts[scenidx].lower() #scen = '_'.join([fileparts[i].lower() for i in scenidx]) # combine #print 'scen', scen, scenarios, '\n' if scen in scenarios: scenfullidx = scenarios.index(scen) else: continue # cannot find scenario scen = scen.split('_') # resplit # mark as found msg_datmat[cm, cp, varfullidx, scenfullidx] = False sim2 = sim + '{:20s}{:15s}'.format(scenarios[scenfullidx], vlist[varfullidx]) # change filename as necessary if fileparts[0] != d.lower(): fileparts[0] = d.lower() # change model name if fileparts[1] != climmodels[cm].lower(): fileparts[1] = climmodels[cm].lower() # change climate name #if fileparts[2] != 'hist': # fileparts[2] = 'hist_fullharm_noirr' # change to hist #for i in range(len(scen)): #print 'scenidx', scenidx,fileparts[scenidx],'\n' if fileparts[scenidx] != 'fullharm': fileparts[scenidx] = 'fullharm' # change scenario if fileparts[varidx] != varname: fileparts[varidx] = varname # change var to lowercase if fileparts[cropidx] != cropabbr[cp]: if fileparts[cropidx] != 'ri2' or cropabbr[cp] != 'ric': # special case of second season for rice called ri2 fileparts[cropidx] = cropabbr[cp] # change crop name if fileparts[annidx] != 'annual': fileparts[annidx] = 'annual' # change to annual yr0f = int(fileparts[yr0idx]); yr1f = int(fileparts[yr1idx].split('.')[0]) info_datmat[5] = '[' + str(yr0f) + ', ' + str(yr1f) + ']' # save year range ncf = nc(subdir + sep + f) # load file fvars = ncf.variables.keys() lat = None; lon = None; time = None if not 'lat' in fvars: # check if latitude exists if 'latitude' in fvars: changes += sim2 + 'DIMENSION: Change latitude to lat\n' lat = ncf.variables['latitude'][:] else: errors += sim2 + 'DIMENSION: No latitude dimension in file\n' else: lat = ncf.variables['lat'][:] if not 'lon' in fvars: # check if longitude exists if 'longitude' in fvars: changes += sim2 + 'DIMENSION: Change longitude to lon\n' lon = ncf.variables['longitude'][:] else: errors += sim2 + 'DIMENSION: No longitude dimension in file\n' else: lon = ncf.variables['lon'][:] if not 'time' in fvars: # check if time exists if 'year' in fvars: errors += sim2 + 'DIMENSION: "year" set as time dimension in file instead of "time"\n' time = ncf.variables['year'][:] info_datmat[6] = len(time) else: errors += sim2 + 'DIMENSION: No time dimension in file\n' else: time = ncf.variables['time'][:] info_datmat[6] = len(time) if not lat is None: if any(diff(lat) > 0): changes += sim2 + 'VALUES: Change latitudes to descending order\n' if abs(lat[0] - 90) > 1. or abs(lat[-1] + 90) > 1.: changes += sim2 + 'RANGE: Change latitude range to [90, -90]\n' if not lon is None: if any(diff(lon) < 0): changes += sim2 + 'VALUES: Change longitudes to ascending order\n' if abs(lon[0] + 180) > 1. or abs(lon[-1] - 180) > 1.: changes += sim2 + 'RANGE: Change longitude range to [-180, 180]\n' if not time is None: if not array_equal(time, range(1, len(time) + 1)): changes += sim2 + 'VALUES: Change time values to increase uniformly from one\n' if 'year' in fvars: timev = ncf.variables['year'] else: timev = ncf.variables['time'] if not 'units' in timev.ncattrs(): changes += sim2 + 'UNITS: Add units to time\n' else: try: tsplit = timev.units.split('growing seasons since ')[1].split(' ') yr0, mth0, day0 = tsplit[0].split('-')[0 : 3] hr0, min0, sec0 = tsplit[1].split(':')[0 : 3] if mth0 != '01' or day0 != '01' or hr0 != '00' or min0 != '00' or sec0 != '00': changes += sim2 + 'UNITS: Change time units date to "' + yr0 + '-01-01 00:00:00"\n' yr0 = int(yr0); yr1 = yr0 + len(time) - 1 # check if filename is consistent if yr0f != yr0: fileparts[yr0idx] = str(yr0) # change start year if yr1f != yr1: fileparts[yr1idx] = str(yr1) + '.nc4' # change end year cyr0 = climyears[cm][0]; cyr1 = climyears[cm][1] # check year range if yr0 > cyr0: errors += sim2 + 'YEARS: Start year > ' + str(cyr0) + '\n' if yr1 < cyr1: errors += sim2 + 'YEARS: End year < ' + str(cyr1) + '\n' except: changes += sim2 + 'UNITS: Change time units to "growing seasons since "<start_year>-01-01 00:00:00"\n' vfile = vlist[varfullidx] + '_' + fileparts[cropidx] if not vfile in fvars: errors += sim2 + 'VARIABLE NAME: Variable name in file is inconsistent with filename\n' else: nvars = 0; vfile = '' # try to find variable name for n in fvars: if not n in ['time', 'lat', 'latitude', 'lon', 'longitude', 'year']: nvars += 1 vfile = n if nvars > 1: vfile = '' # found more than one variable if vfile != '': v = ncf.variables[vfile] # check dimensions if not lat is None and not 'lat' in v.dimensions and not 'latitude' in v.dimensions: errors += sim2 + 'DIMENSION: Variable must be function of lat\n' if not lon is None and not 'lon' in v.dimensions and not 'longitude' in v.dimensions: errors += sim2 + 'DIMENSION: Variable must be function of lon\n' if not time is None and not 'time' in v.dimensions: errors += sim2 + 'DIMENSION: Variable must be function of time\n' # check fill value if not '_FillValue' in v.ncattrs(): changes += sim2 + 'FILL VALUE: Add fill value = 1e20 attribute to variable\n' elif v.getncattr('_FillValue') != float32(1e20): changes += sim2 + 'FILL VALUE: Change fill value to 1e20\n' # check units if not 'units' in v.ncattrs(): changes += sim2 + 'UNITS: Add units = "' + vunits[varfullidx] + '" to variable\n' elif v.units != vunits[varfullidx]: changes += sim2 + 'UNITS: Change units to "' + vunits[varfullidx] + '"\n' v = v[:] # convert to numpy array npts = v.size / 4 # divide by 4 to get rough approximation of land points if isnan(v).any(): # check if nan present errors += sim2 + 'NAN: Using NaN instead of fill value\n' v = masked_where(isnan(v), v) # check if masked array if not isMaskedArray(v): fatal += sim2 + '\n' continue # check percent unmasked punmasked = 100. * (v.size - v.mask.sum()) / npts if punmasked < 10.: errors += sim2 + 'COVERAGE: Spatial coverage is less than 10%\n' # check negative values if vlist[varfullidx] != 'sumt': nneg = (v < 0.).sum() if nneg: pneg = 100. * nneg / npts pneg = '{:.2f}'.format(pneg) errors += sim2 + 'NEGATIVE: ' + pneg + '% of values are negative\n' # check range plower = 100. * (v < vranges[varfullidx][0]).sum() / npts if plower > 0.1: plower = '{:.2f}'.format(plower) errors += sim2 + 'RANGE: ' + plower + '% of values < ' + str(vranges[varfullidx][0]) + '\n' phigher = 100. * logical_and(v > vranges[varfullidx][1], v != 1e20).sum() / npts if phigher > 0.1: phigher = '{:.2f}'.format(phigher) errors += sim2 + 'RANGE: ' + phigher + '% of values > ' + str(vranges[varfullidx][1]) + '\n' # maximum and minimum values vmax = v.max() if isMaskedArray(vmax): vmax = v.fill_value info_datmat[1] = '{:<10.3f}'.format(vmax) if len(info_datmat[1]) > 9: info_datmat[1] = '{:<10.3e}'.format(vmax) vmin = v.min() if isMaskedArray(vmin): vmin = v.fill_value info_datmat[2] = '{:<10.3f}'.format(vmin) if len(info_datmat[2]) > 9: info_datmat[2] = '{:<10.3e}'.format(vmin) # number of points within rang info_datmat[3] = str(logical_and(v >= vranges[varfullidx][0], v <= vranges[varfullidx][1]).sum() / len(time)) # variable name info_datmat[4] = vfile ncf.close() newname = '_'.join(fileparts) if newname != f: changes += sim2 + 'FILENAME: Change to ' + newname + '\n' info += '{:90s}{:15s}{:15s}{:18s}{:15s}{:14s}{:8s}\n'.format(info_datmat[0], info_datmat[1], \ info_datmat[2], info_datmat[3], info_datmat[4], info_datmat[5], info_datmat[6]) # mark missing data cm, cp, vr, sc = where(msg_datmat) for i in range(len(cm)): msg_data += '{:15s}{:15s}{:15s}{:20s}{:15s}\n'.format(d, climmodels[cm[i]], crops[cp[i]], scenarios[sc[i]], vlist[vr[i]]) sumfilename = options.sumdir + sep + 'report_' + d + '.txt' sumfile = open(sumfilename, 'w') sumfile.write('!!! Summary produced by ggcmi.QC.py !!!\n') sumfile.write('!!! for model ' + d + ' !!!\n\n') header = '{:15s}{:15s}{:15s}{:20s}{:8s}'.format('Model', 'Climate', 'Crop', 'Scenario', 'Variable') sumfile.write('FATAL ERRORS\n\n') sumfile.write(header + '\n') sumfile.write('=' * len(header) + '\n') if fatal != '': sumfile.write(fatal + '\n') else: sumfile.write('None\n\n') header = '{:15s}{:15s}{:15s}{:20s}{:15s}{:s}'.format('Model', 'Climate', 'Crop', 'Scenario', 'Variable', 'Error') sumfile.write('DATA QUALITY WARNINGS\n\n') sumfile.write(header + '\n') sumfile.write('=' * len(header) + '\n') if errors != '': sumfile.write(errors + '\n') else: sumfile.write('None\n\n') header = '{:15s}{:15s}{:15s}{:20s}{:15s}{:s}'.format('Model', 'Climate', 'Crop', 'Scenario', 'Variable', 'Change') sumfile.write('METADATA ISSUES\n\n') sumfile.write(header + '\n') sumfile.write('=' * len(header) + '\n') if changes != '': sumfile.write(changes + '\n') else: sumfile.write('None\n\n') header = '{:15s}{:15s}{:s}'.format('Model', 'Climate', 'Crop') sumfile.write('MISSING SIMULATIONS\n\n') sumfile.write(header + '\n') sumfile.write('=' * len(header) + '\n') if msg_sims != '': sumfile.write(msg_sims + '\n') else: sumfile.write('None\n\n') header = '{:90s}{:15}{:15}{:18}{:15}{:14}{:8}'.format('File', 'MaxVal', 'MinVal', 'GridcellsInRange', 'VarName', 'YearRange', 'NumYears') sumfile.write('BASIC INFO\n\n') sumfile.write(header + '\n') sumfile.write('=' * len(header) + '\n') if info != '': sumfile.write(info + '\n') else: sumfile.write('None\n\n') header = '{:15s}{:15s}{:15s}{:20s}{:s}'.format('Model', 'Climate', 'Crop', 'Scenario', 'Variable') sumfile.write('MISSING FILES\n\n') sumfile.write(header + '\n') sumfile.write('=' * len(header) + '\n') if msg_data != '': sumfile.write(msg_data[: -1]) else: sumfile.write('None') sumfile.close()
agpl-3.0
quantopian/zipline
zipline/data/hdf5_daily_bars.py
1
34012
""" HDF5 Pricing File Format ------------------------ At the top level, the file is keyed by country (to support regional files containing multiple countries). Within each country, there are 4 subgroups: ``/data`` ^^^^^^^^^ Each field (OHLCV) is stored in a dataset as a 2D array, with a row per sid and a column per session. This differs from the more standard orientation of dates x sids, because it allows each compressed block to contain contiguous values for the same sid, which allows for better compression. .. code-block:: none /data /open /high /low /close /volume ``/index`` ^^^^^^^^^^ Contains two datasets, the index of sids (aligned to the rows of the OHLCV 2D arrays) and index of sessions (aligned to the columns of the OHLCV 2D arrays) to use for lookups. .. code-block:: none /index /sid /day ``/lifetimes`` ^^^^^^^^^^^^^^ Contains two datasets, start_date and end_date, defining the lifetime for each asset, aligned to the sids index. .. code-block:: none /lifetimes /start_date /end_date ``/currency`` ^^^^^^^^^^^^^ Contains a single dataset, ``code``, aligned to the sids index, which contains the listing currency of each sid. Example ^^^^^^^ Sample layout of the full file with multiple countries. .. code-block:: none |- /US | |- /data | | |- /open | | |- /high | | |- /low | | |- /close | | |- /volume | | | |- /index | | |- /sid | | |- /day | | | |- /lifetimes | | |- /start_date | | |- /end_date | | | |- /currency | |- /code | |- /CA |- /data | |- /open | |- /high | |- /low | |- /close | |- /volume | |- /index | |- /sid | |- /day | |- /lifetimes | |- /start_date | |- /end_date | |- /currency |- /code """ from functools import partial import h5py import logbook import numpy as np import pandas as pd from six import iteritems, raise_from, viewkeys from six.moves import reduce from zipline.data.bar_reader import ( NoDataAfterDate, NoDataBeforeDate, NoDataForSid, NoDataOnDate, ) from zipline.data.session_bars import CurrencyAwareSessionBarReader from zipline.utils.memoize import lazyval from zipline.utils.numpy_utils import bytes_array_to_native_str_object_array from zipline.utils.pandas_utils import check_indexes_all_same log = logbook.Logger('HDF5DailyBars') VERSION = 0 DATA = 'data' INDEX = 'index' LIFETIMES = 'lifetimes' CURRENCY = 'currency' CODE = 'code' SCALING_FACTOR = 'scaling_factor' OPEN = 'open' HIGH = 'high' LOW = 'low' CLOSE = 'close' VOLUME = 'volume' FIELDS = (OPEN, HIGH, LOW, CLOSE, VOLUME) DAY = 'day' SID = 'sid' START_DATE = 'start_date' END_DATE = 'end_date' # XXX is reserved for "transactions involving no currency". MISSING_CURRENCY = 'XXX' DEFAULT_SCALING_FACTORS = { # Retain 3 decimal places for prices. OPEN: 1000, HIGH: 1000, LOW: 1000, CLOSE: 1000, # Volume is expected to be a whole integer. VOLUME: 1, } def coerce_to_uint32(a, scaling_factor): """ Returns a copy of the array as uint32, applying a scaling factor to maintain precision if supplied. """ return (a * scaling_factor).round().astype('uint32') def days_and_sids_for_frames(frames): """ Returns the date index and sid columns shared by a list of dataframes, ensuring they all match. Parameters ---------- frames : list[pd.DataFrame] A list of dataframes indexed by day, with a column per sid. Returns ------- days : np.array[datetime64[ns]] The days in these dataframes. sids : np.array[int64] The sids in these dataframes. Raises ------ ValueError If the dataframes passed are not all indexed by the same days and sids. """ if not frames: days = np.array([], dtype='datetime64[ns]') sids = np.array([], dtype='int64') return days, sids # Ensure the indices and columns all match. check_indexes_all_same( [frame.index for frame in frames], message='Frames have mismatched days.', ) check_indexes_all_same( [frame.columns for frame in frames], message='Frames have mismatched sids.', ) return frames[0].index.values, frames[0].columns.values class HDF5DailyBarWriter(object): """ Class capable of writing daily OHLCV data to disk in a format that can be read efficiently by HDF5DailyBarReader. Parameters ---------- filename : str The location at which we should write our output. date_chunk_size : int The number of days per chunk in the HDF5 file. If this is greater than the number of days in the data, the chunksize will match the actual number of days. See Also -------- zipline.data.hdf5_daily_bars.HDF5DailyBarReader """ def __init__(self, filename, date_chunk_size): self._filename = filename self._date_chunk_size = date_chunk_size def h5_file(self, mode): return h5py.File(self._filename, mode) def write(self, country_code, frames, currency_codes=None, scaling_factors=None): """ Write the OHLCV data for one country to the HDF5 file. Parameters ---------- country_code : str The ISO 3166 alpha-2 country code for this country. frames : dict[str, pd.DataFrame] A dict mapping each OHLCV field to a dataframe with a row for each date and a column for each sid. The dataframes need to have the same index and columns. currency_codes : pd.Series, optional Series mapping sids to 3-digit currency code values for those sids' listing currencies. If not passed, missing currencies will be written. scaling_factors : dict[str, float], optional A dict mapping each OHLCV field to a scaling factor, which is applied (as a multiplier) to the values of field to efficiently store them as uint32, while maintaining desired precision. These factors are written to the file as metadata, which is consumed by the reader to adjust back to the original float values. Default is None, in which case DEFAULT_SCALING_FACTORS is used. """ if scaling_factors is None: scaling_factors = DEFAULT_SCALING_FACTORS # Note that this functions validates that all of the frames # share the same days and sids. days, sids = days_and_sids_for_frames(list(frames.values())) # XXX: We should make this required once we're using it everywhere. if currency_codes is None: currency_codes = pd.Series(index=sids, data=MISSING_CURRENCY) # Currency codes should match dataframe columns. check_sids_arrays_match( sids, currency_codes.index.values, message="currency_codes sids do not match data sids:", ) # Write start and end dates for each sid. start_date_ixs, end_date_ixs = compute_asset_lifetimes(frames) if len(sids): chunks = (len(sids), min(self._date_chunk_size, len(days))) else: # h5py crashes if we provide chunks for empty data. chunks = None with self.h5_file(mode='a') as h5_file: # ensure that the file version has been written h5_file.attrs['version'] = VERSION country_group = h5_file.create_group(country_code) self._write_index_group(country_group, days, sids) self._write_lifetimes_group( country_group, start_date_ixs, end_date_ixs, ) self._write_currency_group(country_group, currency_codes) self._write_data_group( country_group, frames, scaling_factors, chunks, ) def write_from_sid_df_pairs(self, country_code, data, currency_codes=None, scaling_factors=None): """ Parameters ---------- country_code : str The ISO 3166 alpha-2 country code for this country. data : iterable[tuple[int, pandas.DataFrame]] The data chunks to write. Each chunk should be a tuple of sid and the data for that asset. currency_codes : pd.Series, optional Series mapping sids to 3-digit currency code values for those sids' listing currencies. If not passed, missing currencies will be written. scaling_factors : dict[str, float], optional A dict mapping each OHLCV field to a scaling factor, which is applied (as a multiplier) to the values of field to efficiently store them as uint32, while maintaining desired precision. These factors are written to the file as metadata, which is consumed by the reader to adjust back to the original float values. Default is None, in which case DEFAULT_SCALING_FACTORS is used. """ data = list(data) if not data: empty_frame = pd.DataFrame( data=None, index=np.array([], dtype='datetime64[ns]'), columns=np.array([], dtype='int64'), ) return self.write( country_code, {f: empty_frame.copy() for f in FIELDS}, scaling_factors, ) sids, frames = zip(*data) ohlcv_frame = pd.concat(frames) # Repeat each sid for each row in its corresponding frame. sid_ix = np.repeat(sids, [len(f) for f in frames]) # Add id to the index, so the frame is indexed by (date, id). ohlcv_frame.set_index(sid_ix, append=True, inplace=True) frames = { field: ohlcv_frame[field].unstack() for field in FIELDS } return self.write( country_code=country_code, frames=frames, scaling_factors=scaling_factors, currency_codes=currency_codes ) def _write_index_group(self, country_group, days, sids): """Write /country/index. """ index_group = country_group.create_group(INDEX) self._log_writing_dataset(index_group) index_group.create_dataset(SID, data=sids) # h5py does not support datetimes, so they need to be stored # as integers. index_group.create_dataset(DAY, data=days.astype(np.int64)) def _write_lifetimes_group(self, country_group, start_date_ixs, end_date_ixs): """Write /country/lifetimes """ lifetimes_group = country_group.create_group(LIFETIMES) self._log_writing_dataset(lifetimes_group) lifetimes_group.create_dataset(START_DATE, data=start_date_ixs) lifetimes_group.create_dataset(END_DATE, data=end_date_ixs) def _write_currency_group(self, country_group, currencies): """Write /country/currency """ currency_group = country_group.create_group(CURRENCY) self._log_writing_dataset(currency_group) currency_group.create_dataset( CODE, data=currencies.values.astype(dtype='S3'), ) def _write_data_group(self, country_group, frames, scaling_factors, chunks): """Write /country/data """ data_group = country_group.create_group(DATA) self._log_writing_dataset(data_group) for field in FIELDS: frame = frames[field] # Sort rows by increasing sid, and columns by increasing date. frame.sort_index(inplace=True) frame.sort_index(axis='columns', inplace=True) data = coerce_to_uint32( frame.T.fillna(0).values, scaling_factors[field], ) dataset = data_group.create_dataset( field, compression='lzf', shuffle=True, data=data, chunks=chunks, ) self._log_writing_dataset(dataset) dataset.attrs[SCALING_FACTOR] = scaling_factors[field] log.debug( 'Writing dataset {} to file {}', dataset.name, self._filename ) def _log_writing_dataset(self, dataset): log.debug("Writing {} to file {}", dataset.name, self._filename) def compute_asset_lifetimes(frames): """ Parameters ---------- frames : dict[str, pd.DataFrame] A dict mapping each OHLCV field to a dataframe with a row for each date and a column for each sid, as passed to write(). Returns ------- start_date_ixs : np.array[int64] The index of the first date with non-nan values, for each sid. end_date_ixs : np.array[int64] The index of the last date with non-nan values, for each sid. """ # Build a 2D array (dates x sids), where an entry is True if all # fields are nan for the given day and sid. is_null_matrix = np.logical_and.reduce( [frames[field].isnull().values for field in FIELDS], ) if not is_null_matrix.size: empty = np.array([], dtype='int64') return empty, empty.copy() # Offset of the first null from the start of the input. start_date_ixs = is_null_matrix.argmin(axis=0) # Offset of the last null from the **end** of the input. end_offsets = is_null_matrix[::-1].argmin(axis=0) # Offset of the last null from the start of the input end_date_ixs = is_null_matrix.shape[0] - end_offsets - 1 return start_date_ixs, end_date_ixs def convert_price_with_scaling_factor(a, scaling_factor): conversion_factor = (1.0 / scaling_factor) zeroes = (a == 0) return np.where(zeroes, np.nan, a.astype('float64')) * conversion_factor class HDF5DailyBarReader(CurrencyAwareSessionBarReader): """ Parameters --------- country_group : h5py.Group The group for a single country in an HDF5 daily pricing file. """ def __init__(self, country_group): self._country_group = country_group self._postprocessors = { OPEN: partial(convert_price_with_scaling_factor, scaling_factor=self._read_scaling_factor(OPEN)), HIGH: partial(convert_price_with_scaling_factor, scaling_factor=self._read_scaling_factor(HIGH)), LOW: partial(convert_price_with_scaling_factor, scaling_factor=self._read_scaling_factor(LOW)), CLOSE: partial(convert_price_with_scaling_factor, scaling_factor=self._read_scaling_factor(CLOSE)), VOLUME: lambda a: a, } @classmethod def from_file(cls, h5_file, country_code): """ Construct from an h5py.File and a country code. Parameters ---------- h5_file : h5py.File An HDF5 daily pricing file. country_code : str The ISO 3166 alpha-2 country code for the country to read. """ if h5_file.attrs['version'] != VERSION: raise ValueError( 'mismatched version: file is of version %s, expected %s' % ( h5_file.attrs['version'], VERSION, ), ) return cls(h5_file[country_code]) @classmethod def from_path(cls, path, country_code): """ Construct from a file path and a country code. Parameters ---------- path : str The path to an HDF5 daily pricing file. country_code : str The ISO 3166 alpha-2 country code for the country to read. """ return cls.from_file(h5py.File(path), country_code) def _read_scaling_factor(self, field): return self._country_group[DATA][field].attrs[SCALING_FACTOR] def load_raw_arrays(self, columns, start_date, end_date, assets): """ Parameters ---------- columns : list of str 'open', 'high', 'low', 'close', or 'volume' start_date: Timestamp Beginning of the window range. end_date: Timestamp End of the window range. assets : list of int The asset identifiers in the window. Returns ------- list of np.ndarray A list with an entry per field of ndarrays with shape (minutes in range, sids) with a dtype of float64, containing the values for the respective field over start and end dt range. """ self._validate_timestamp(start_date) self._validate_timestamp(end_date) start = start_date.asm8 end = end_date.asm8 date_slice = self._compute_date_range_slice(start, end) n_dates = date_slice.stop - date_slice.start # Create a buffer into which we'll read data from the h5 file. # Allocate an extra row of space that will always contain null values. # We'll use that space to provide "data" for entries in ``assets`` that # are unknown to us. full_buf = np.zeros((len(self.sids) + 1, n_dates), dtype=np.uint32) # We'll only read values into this portion of the read buf. mutable_buf = full_buf[:-1] # Indexer that converts an array aligned to self.sids (which is what we # pull from the h5 file) into an array aligned to ``assets``. # # Unknown assets will have an index of -1, which means they'll always # pull from the last row of the read buffer. We allocated an extra # empty row above so that these lookups will cause us to fill our # output buffer with "null" values. sid_selector = self._make_sid_selector(assets) out = [] for column in columns: # Zero the buffer to prepare to receive new data. mutable_buf.fill(0) dataset = self._country_group[DATA][column] # Fill the mutable portion of our buffer with data from the file. dataset.read_direct( mutable_buf, np.s_[:, date_slice], ) # Select data from the **full buffer**. Unknown assets will pull # from the last row, which is always empty. out.append(self._postprocessors[column](full_buf[sid_selector].T)) return out def _make_sid_selector(self, assets): """ Build an indexer mapping ``self.sids`` to ``assets``. Parameters ---------- assets : list[int] List of assets requested by a caller of ``load_raw_arrays``. Returns ------- index : np.array[int64] Index array containing the index in ``self.sids`` for each location in ``assets``. Entries in ``assets`` for which we don't have a sid will contain -1. It is caller's responsibility to handle these values correctly. """ assets = np.array(assets) sid_selector = self.sids.searchsorted(assets) unknown = np.in1d(assets, self.sids, invert=True) sid_selector[unknown] = -1 return sid_selector def _compute_date_range_slice(self, start_date, end_date): # Get the index of the start of dates for ``start_date``. start_ix = self.dates.searchsorted(start_date) # Get the index of the start of the first date **after** end_date. end_ix = self.dates.searchsorted(end_date, side='right') return slice(start_ix, end_ix) def _validate_assets(self, assets): """Validate that asset identifiers are contained in the daily bars. Parameters ---------- assets : array-like[int] The asset identifiers to validate. Raises ------ NoDataForSid If one or more of the provided asset identifiers are not contained in the daily bars. """ missing_sids = np.setdiff1d(assets, self.sids) if len(missing_sids): raise NoDataForSid( 'Assets not contained in daily pricing file: {}'.format( missing_sids ) ) def _validate_timestamp(self, ts): if ts.asm8 not in self.dates: raise NoDataOnDate(ts) @lazyval def dates(self): return self._country_group[INDEX][DAY][:].astype('datetime64[ns]') @lazyval def sids(self): return self._country_group[INDEX][SID][:].astype('int64', copy=False) @lazyval def asset_start_dates(self): return self.dates[self._country_group[LIFETIMES][START_DATE][:]] @lazyval def asset_end_dates(self): return self.dates[self._country_group[LIFETIMES][END_DATE][:]] @lazyval def _currency_codes(self): bytes_array = self._country_group[CURRENCY][CODE][:] return bytes_array_to_native_str_object_array(bytes_array) def currency_codes(self, sids): """Get currencies in which prices are quoted for the requested sids. Parameters ---------- sids : np.array[int64] Array of sids for which currencies are needed. Returns ------- currency_codes : np.array[object] Array of currency codes for listing currencies of ``sids``. """ # Find the index of requested sids in our stored sids. ixs = self.sids.searchsorted(sids, side='left') result = self._currency_codes[ixs] # searchsorted returns the index of the next lowest sid if the lookup # fails. Fill these sids with the special "missing" sentinel. not_found = (self.sids[ixs] != sids) result[not_found] = None return result @property def last_available_dt(self): """ Returns ------- dt : pd.Timestamp The last session for which the reader can provide data. """ return pd.Timestamp(self.dates[-1], tz='UTC') @property def trading_calendar(self): """ Returns the zipline.utils.calendar.trading_calendar used to read the data. Can be None (if the writer didn't specify it). """ raise NotImplementedError( 'HDF5 pricing does not yet support trading calendars.' ) @property def first_trading_day(self): """ Returns ------- dt : pd.Timestamp The first trading day (session) for which the reader can provide data. """ return pd.Timestamp(self.dates[0], tz='UTC') @lazyval def sessions(self): """ Returns ------- sessions : DatetimeIndex All session labels (unioning the range for all assets) which the reader can provide. """ return pd.to_datetime(self.dates, utc=True) def get_value(self, sid, dt, field): """ Retrieve the value at the given coordinates. Parameters ---------- sid : int The asset identifier. dt : pd.Timestamp The timestamp for the desired data point. field : string The OHLVC name for the desired data point. Returns ------- value : float|int The value at the given coordinates, ``float`` for OHLC, ``int`` for 'volume'. Raises ------ NoDataOnDate If the given dt is not a valid market minute (in minute mode) or session (in daily mode) according to this reader's tradingcalendar. """ self._validate_assets([sid]) self._validate_timestamp(dt) sid_ix = self.sids.searchsorted(sid) dt_ix = self.dates.searchsorted(dt.asm8) value = self._postprocessors[field]( self._country_group[DATA][field][sid_ix, dt_ix] ) # When the value is nan, this dt may be outside the asset's lifetime. # If that's the case, the proper NoDataOnDate exception is raised. # Otherwise (when there's just a hole in the middle of the data), the # nan is returned. if np.isnan(value): if dt.asm8 < self.asset_start_dates[sid_ix]: raise NoDataBeforeDate() if dt.asm8 > self.asset_end_dates[sid_ix]: raise NoDataAfterDate() return value def get_last_traded_dt(self, asset, dt): """ Get the latest day on or before ``dt`` in which ``asset`` traded. If there are no trades on or before ``dt``, returns ``pd.NaT``. Parameters ---------- asset : zipline.asset.Asset The asset for which to get the last traded day. dt : pd.Timestamp The dt at which to start searching for the last traded day. Returns ------- last_traded : pd.Timestamp The day of the last trade for the given asset, using the input dt as a vantage point. """ sid_ix = self.sids.searchsorted(asset.sid) # Used to get a slice of all dates up to and including ``dt``. dt_limit_ix = self.dates.searchsorted(dt.asm8, side='right') # Get the indices of all dates with nonzero volume. nonzero_volume_ixs = np.ravel( np.nonzero(self._country_group[DATA][VOLUME][sid_ix, :dt_limit_ix]) ) if len(nonzero_volume_ixs) == 0: return pd.NaT return pd.Timestamp(self.dates[nonzero_volume_ixs][-1], tz='UTC') class MultiCountryDailyBarReader(CurrencyAwareSessionBarReader): """ Parameters --------- readers : dict[str -> SessionBarReader] A dict mapping country codes to SessionBarReader instances to service each country. """ def __init__(self, readers): self._readers = readers self._country_map = pd.concat([ pd.Series(index=reader.sids, data=country_code) for country_code, reader in iteritems(readers) ]) @classmethod def from_file(cls, h5_file): """ Construct from an h5py.File. Parameters ---------- h5_file : h5py.File An HDF5 daily pricing file. """ return cls({ country: HDF5DailyBarReader.from_file(h5_file, country) for country in h5_file.keys() }) @classmethod def from_path(cls, path): """ Construct from a file path. Parameters ---------- path : str Path to an HDF5 daily pricing file. """ return cls.from_file(h5py.File(path)) @property def countries(self): """A set-like object of the country codes supplied by this reader. """ return viewkeys(self._readers) def _country_code_for_assets(self, assets): country_codes = self._country_map.get(assets) # In some versions of pandas (observed in 0.22), Series.get() # returns None if none of the labels are in the index. if country_codes is not None: unique_country_codes = country_codes.dropna().unique() num_countries = len(unique_country_codes) else: num_countries = 0 if num_countries == 0: raise ValueError('At least one valid asset id is required.') elif num_countries > 1: raise NotImplementedError( ( 'Assets were requested from multiple countries ({}),' ' but multi-country reads are not yet supported.' ).format(list(unique_country_codes)) ) return np.asscalar(unique_country_codes) def load_raw_arrays(self, columns, start_date, end_date, assets): """ Parameters ---------- columns : list of str 'open', 'high', 'low', 'close', or 'volume' start_date: Timestamp Beginning of the window range. end_date: Timestamp End of the window range. assets : list of int The asset identifiers in the window. Returns ------- list of np.ndarray A list with an entry per field of ndarrays with shape (minutes in range, sids) with a dtype of float64, containing the values for the respective field over start and end dt range. """ country_code = self._country_code_for_assets(assets) return self._readers[country_code].load_raw_arrays( columns, start_date, end_date, assets, ) @property def last_available_dt(self): """ Returns ------- dt : pd.Timestamp The last session for which the reader can provide data. """ return max( reader.last_available_dt for reader in self._readers.values() ) @property def trading_calendar(self): """ Returns the zipline.utils.calendar.trading_calendar used to read the data. Can be None (if the writer didn't specify it). """ raise NotImplementedError( 'HDF5 pricing does not yet support trading calendars.' ) @property def first_trading_day(self): """ Returns ------- dt : pd.Timestamp The first trading day (session) for which the reader can provide data. """ return min( reader.first_trading_day for reader in self._readers.values() ) @property def sessions(self): """ Returns ------- sessions : DatetimeIndex All session labels (unioning the range for all assets) which the reader can provide. """ return pd.to_datetime( reduce( np.union1d, (reader.dates for reader in self._readers.values()), ), utc=True, ) def get_value(self, sid, dt, field): """ Retrieve the value at the given coordinates. Parameters ---------- sid : int The asset identifier. dt : pd.Timestamp The timestamp for the desired data point. field : string The OHLVC name for the desired data point. Returns ------- value : float|int The value at the given coordinates, ``float`` for OHLC, ``int`` for 'volume'. Raises ------ NoDataOnDate If the given dt is not a valid market minute (in minute mode) or session (in daily mode) according to this reader's tradingcalendar. NoDataForSid If the given sid is not valid. """ try: country_code = self._country_code_for_assets([sid]) except ValueError as exc: raise_from( NoDataForSid( 'Asset not contained in daily pricing file: {}'.format(sid) ), exc ) return self._readers[country_code].get_value(sid, dt, field) def get_last_traded_dt(self, asset, dt): """ Get the latest day on or before ``dt`` in which ``asset`` traded. If there are no trades on or before ``dt``, returns ``pd.NaT``. Parameters ---------- asset : zipline.asset.Asset The asset for which to get the last traded day. dt : pd.Timestamp The dt at which to start searching for the last traded day. Returns ------- last_traded : pd.Timestamp The day of the last trade for the given asset, using the input dt as a vantage point. """ country_code = self._country_code_for_assets([asset.sid]) return self._readers[country_code].get_last_traded_dt(asset, dt) def currency_codes(self, sids): """Get currencies in which prices are quoted for the requested sids. Assumes that a sid's prices are always quoted in a single currency. Parameters ---------- sids : np.array[int64] Array of sids for which currencies are needed. Returns ------- currency_codes : np.array[S3] Array of currency codes for listing currencies of ``sids``. """ country_code = self._country_code_for_assets(sids) return self._readers[country_code].currency_codes(sids) def check_sids_arrays_match(left, right, message): """Check that two 1d arrays of sids are equal """ if len(left) != len(right): raise ValueError( "{}:\nlen(left) ({}) != len(right) ({})".format( message, len(left), len(right) ) ) diff = (left != right) if diff.any(): (bad_locs,) = np.where(diff) raise ValueError( "{}:\n Indices with differences: {}".format(message, bad_locs) )
apache-2.0
arabenjamin/scikit-learn
sklearn/neighbors/approximate.py
127
22351
"""Approximate nearest neighbor search""" # Author: Maheshakya Wijewardena <maheshakya.10@cse.mrt.ac.lk> # Joel Nothman <joel.nothman@gmail.com> import numpy as np import warnings from scipy import sparse from .base import KNeighborsMixin, RadiusNeighborsMixin from ..base import BaseEstimator from ..utils.validation import check_array from ..utils import check_random_state from ..metrics.pairwise import pairwise_distances from ..random_projection import GaussianRandomProjection __all__ = ["LSHForest"] HASH_DTYPE = '>u4' MAX_HASH_SIZE = np.dtype(HASH_DTYPE).itemsize * 8 def _find_matching_indices(tree, bin_X, left_mask, right_mask): """Finds indices in sorted array of integers. Most significant h bits in the binary representations of the integers are matched with the items' most significant h bits. """ left_index = np.searchsorted(tree, bin_X & left_mask) right_index = np.searchsorted(tree, bin_X | right_mask, side='right') return left_index, right_index def _find_longest_prefix_match(tree, bin_X, hash_size, left_masks, right_masks): """Find the longest prefix match in tree for each query in bin_X Most significant bits are considered as the prefix. """ hi = np.empty_like(bin_X, dtype=np.intp) hi.fill(hash_size) lo = np.zeros_like(bin_X, dtype=np.intp) res = np.empty_like(bin_X, dtype=np.intp) left_idx, right_idx = _find_matching_indices(tree, bin_X, left_masks[hi], right_masks[hi]) found = right_idx > left_idx res[found] = lo[found] = hash_size r = np.arange(bin_X.shape[0]) kept = r[lo < hi] # indices remaining in bin_X mask while kept.shape[0]: mid = (lo.take(kept) + hi.take(kept)) // 2 left_idx, right_idx = _find_matching_indices(tree, bin_X.take(kept), left_masks[mid], right_masks[mid]) found = right_idx > left_idx mid_found = mid[found] lo[kept[found]] = mid_found + 1 res[kept[found]] = mid_found hi[kept[~found]] = mid[~found] kept = r[lo < hi] return res class ProjectionToHashMixin(object): """Turn a transformed real-valued array into a hash""" @staticmethod def _to_hash(projected): if projected.shape[1] % 8 != 0: raise ValueError('Require reduced dimensionality to be a multiple ' 'of 8 for hashing') # XXX: perhaps non-copying operation better out = np.packbits((projected > 0).astype(int)).view(dtype=HASH_DTYPE) return out.reshape(projected.shape[0], -1) def fit_transform(self, X, y=None): self.fit(X) return self.transform(X) def transform(self, X, y=None): return self._to_hash(super(ProjectionToHashMixin, self).transform(X)) class GaussianRandomProjectionHash(ProjectionToHashMixin, GaussianRandomProjection): """Use GaussianRandomProjection to produce a cosine LSH fingerprint""" def __init__(self, n_components=8, random_state=None): super(GaussianRandomProjectionHash, self).__init__( n_components=n_components, random_state=random_state) def _array_of_arrays(list_of_arrays): """Creates an array of array from list of arrays.""" out = np.empty(len(list_of_arrays), dtype=object) out[:] = list_of_arrays return out class LSHForest(BaseEstimator, KNeighborsMixin, RadiusNeighborsMixin): """Performs approximate nearest neighbor search using LSH forest. LSH Forest: Locality Sensitive Hashing forest [1] is an alternative method for vanilla approximate nearest neighbor search methods. LSH forest data structure has been implemented using sorted arrays and binary search and 32 bit fixed-length hashes. Random projection is used as the hash family which approximates cosine distance. The cosine distance is defined as ``1 - cosine_similarity``: the lowest value is 0 (identical point) but it is bounded above by 2 for the farthest points. Its value does not depend on the norm of the vector points but only on their relative angles. Read more in the :ref:`User Guide <approximate_nearest_neighbors>`. Parameters ---------- n_estimators : int (default = 10) Number of trees in the LSH Forest. min_hash_match : int (default = 4) lowest hash length to be searched when candidate selection is performed for nearest neighbors. n_candidates : int (default = 10) Minimum number of candidates evaluated per estimator, assuming enough items meet the `min_hash_match` constraint. n_neighbors : int (default = 5) Number of neighbors to be returned from query function when it is not provided to the :meth:`kneighbors` method. radius : float, optinal (default = 1.0) Radius from the data point to its neighbors. This is the parameter space to use by default for the :meth`radius_neighbors` queries. radius_cutoff_ratio : float, optional (default = 0.9) A value ranges from 0 to 1. Radius neighbors will be searched until the ratio between total neighbors within the radius and the total candidates becomes less than this value unless it is terminated by hash length reaching `min_hash_match`. random_state : int, RandomState instance or None, optional (default=None) If int, random_state is the seed used by the random number generator; If RandomState instance, random_state is the random number generator; If None, the random number generator is the RandomState instance used by `np.random`. Attributes ---------- hash_functions_ : list of GaussianRandomProjectionHash objects Hash function g(p,x) for a tree is an array of 32 randomly generated float arrays with the same dimenstion as the data set. This array is stored in GaussianRandomProjectionHash object and can be obtained from ``components_`` attribute. trees_ : array, shape (n_estimators, n_samples) Each tree (corresponding to a hash function) contains an array of sorted hashed values. The array representation may change in future versions. original_indices_ : array, shape (n_estimators, n_samples) Original indices of sorted hashed values in the fitted index. References ---------- .. [1] M. Bawa, T. Condie and P. Ganesan, "LSH Forest: Self-Tuning Indexes for Similarity Search", WWW '05 Proceedings of the 14th international conference on World Wide Web, 651-660, 2005. Examples -------- >>> from sklearn.neighbors import LSHForest >>> X_train = [[5, 5, 2], [21, 5, 5], [1, 1, 1], [8, 9, 1], [6, 10, 2]] >>> X_test = [[9, 1, 6], [3, 1, 10], [7, 10, 3]] >>> lshf = LSHForest() >>> lshf.fit(X_train) # doctest: +NORMALIZE_WHITESPACE LSHForest(min_hash_match=4, n_candidates=50, n_estimators=10, n_neighbors=5, radius=1.0, radius_cutoff_ratio=0.9, random_state=None) >>> distances, indices = lshf.kneighbors(X_test, n_neighbors=2) >>> distances # doctest: +ELLIPSIS array([[ 0.069..., 0.149...], [ 0.229..., 0.481...], [ 0.004..., 0.014...]]) >>> indices array([[1, 2], [2, 0], [4, 0]]) """ def __init__(self, n_estimators=10, radius=1.0, n_candidates=50, n_neighbors=5, min_hash_match=4, radius_cutoff_ratio=.9, random_state=None): self.n_estimators = n_estimators self.radius = radius self.random_state = random_state self.n_candidates = n_candidates self.n_neighbors = n_neighbors self.min_hash_match = min_hash_match self.radius_cutoff_ratio = radius_cutoff_ratio def _compute_distances(self, query, candidates): """Computes the cosine distance. Distance is from the query to points in the candidates array. Returns argsort of distances in the candidates array and sorted distances. """ if candidates.shape == (0,): # needed since _fit_X[np.array([])] doesn't work if _fit_X sparse return np.empty(0, dtype=np.int), np.empty(0, dtype=float) if sparse.issparse(self._fit_X): candidate_X = self._fit_X[candidates] else: candidate_X = self._fit_X.take(candidates, axis=0, mode='clip') distances = pairwise_distances(query, candidate_X, metric='cosine')[0] distance_positions = np.argsort(distances) distances = distances.take(distance_positions, mode='clip', axis=0) return distance_positions, distances def _generate_masks(self): """Creates left and right masks for all hash lengths.""" tri_size = MAX_HASH_SIZE + 1 # Called once on fitting, output is independent of hashes left_mask = np.tril(np.ones((tri_size, tri_size), dtype=int))[:, 1:] right_mask = left_mask[::-1, ::-1] self._left_mask = np.packbits(left_mask).view(dtype=HASH_DTYPE) self._right_mask = np.packbits(right_mask).view(dtype=HASH_DTYPE) def _get_candidates(self, query, max_depth, bin_queries, n_neighbors): """Performs the Synchronous ascending phase. Returns an array of candidates, their distance ranks and distances. """ index_size = self._fit_X.shape[0] # Number of candidates considered including duplicates # XXX: not sure whether this is being calculated correctly wrt # duplicates from different iterations through a single tree n_candidates = 0 candidate_set = set() min_candidates = self.n_candidates * self.n_estimators while (max_depth > self.min_hash_match and (n_candidates < min_candidates or len(candidate_set) < n_neighbors)): left_mask = self._left_mask[max_depth] right_mask = self._right_mask[max_depth] for i in range(self.n_estimators): start, stop = _find_matching_indices(self.trees_[i], bin_queries[i], left_mask, right_mask) n_candidates += stop - start candidate_set.update( self.original_indices_[i][start:stop].tolist()) max_depth -= 1 candidates = np.fromiter(candidate_set, count=len(candidate_set), dtype=np.intp) # For insufficient candidates, candidates are filled. # Candidates are filled from unselected indices uniformly. if candidates.shape[0] < n_neighbors: warnings.warn( "Number of candidates is not sufficient to retrieve" " %i neighbors with" " min_hash_match = %i. Candidates are filled up" " uniformly from unselected" " indices." % (n_neighbors, self.min_hash_match)) remaining = np.setdiff1d(np.arange(0, index_size), candidates) to_fill = n_neighbors - candidates.shape[0] candidates = np.concatenate((candidates, remaining[:to_fill])) ranks, distances = self._compute_distances(query, candidates.astype(int)) return (candidates[ranks[:n_neighbors]], distances[:n_neighbors]) def _get_radius_neighbors(self, query, max_depth, bin_queries, radius): """Finds radius neighbors from the candidates obtained. Their distances from query are smaller than radius. Returns radius neighbors and distances. """ ratio_within_radius = 1 threshold = 1 - self.radius_cutoff_ratio total_candidates = np.array([], dtype=int) total_neighbors = np.array([], dtype=int) total_distances = np.array([], dtype=float) while (max_depth > self.min_hash_match and ratio_within_radius > threshold): left_mask = self._left_mask[max_depth] right_mask = self._right_mask[max_depth] candidates = [] for i in range(self.n_estimators): start, stop = _find_matching_indices(self.trees_[i], bin_queries[i], left_mask, right_mask) candidates.extend( self.original_indices_[i][start:stop].tolist()) candidates = np.setdiff1d(candidates, total_candidates) total_candidates = np.append(total_candidates, candidates) ranks, distances = self._compute_distances(query, candidates) m = np.searchsorted(distances, radius, side='right') positions = np.searchsorted(total_distances, distances[:m]) total_neighbors = np.insert(total_neighbors, positions, candidates[ranks[:m]]) total_distances = np.insert(total_distances, positions, distances[:m]) ratio_within_radius = (total_neighbors.shape[0] / float(total_candidates.shape[0])) max_depth = max_depth - 1 return total_neighbors, total_distances def fit(self, X, y=None): """Fit the LSH forest on the data. This creates binary hashes of input data points by getting the dot product of input points and hash_function then transforming the projection into a binary string array based on the sign (positive/negative) of the projection. A sorted array of binary hashes is created. Parameters ---------- X : array_like or sparse (CSR) matrix, shape (n_samples, n_features) List of n_features-dimensional data points. Each row corresponds to a single data point. Returns ------- self : object Returns self. """ self._fit_X = check_array(X, accept_sparse='csr') # Creates a g(p,x) for each tree self.hash_functions_ = [] self.trees_ = [] self.original_indices_ = [] rng = check_random_state(self.random_state) int_max = np.iinfo(np.int32).max for i in range(self.n_estimators): # This is g(p,x) for a particular tree. # Builds a single tree. Hashing is done on an array of data points. # `GaussianRandomProjection` is used for hashing. # `n_components=hash size and n_features=n_dim. hasher = GaussianRandomProjectionHash(MAX_HASH_SIZE, rng.randint(0, int_max)) hashes = hasher.fit_transform(self._fit_X)[:, 0] original_index = np.argsort(hashes) bin_hashes = hashes[original_index] self.original_indices_.append(original_index) self.trees_.append(bin_hashes) self.hash_functions_.append(hasher) self._generate_masks() return self def _query(self, X): """Performs descending phase to find maximum depth.""" # Calculate hashes of shape (n_samples, n_estimators, [hash_size]) bin_queries = np.asarray([hasher.transform(X)[:, 0] for hasher in self.hash_functions_]) bin_queries = np.rollaxis(bin_queries, 1) # descend phase depths = [_find_longest_prefix_match(tree, tree_queries, MAX_HASH_SIZE, self._left_mask, self._right_mask) for tree, tree_queries in zip(self.trees_, np.rollaxis(bin_queries, 1))] return bin_queries, np.max(depths, axis=0) def kneighbors(self, X, n_neighbors=None, return_distance=True): """Returns n_neighbors of approximate nearest neighbors. Parameters ---------- X : array_like or sparse (CSR) matrix, shape (n_samples, n_features) List of n_features-dimensional data points. Each row corresponds to a single query. n_neighbors : int, opitonal (default = None) Number of neighbors required. If not provided, this will return the number specified at the initialization. return_distance : boolean, optional (default = False) Returns the distances of neighbors if set to True. Returns ------- dist : array, shape (n_samples, n_neighbors) Array representing the cosine distances to each point, only present if return_distance=True. ind : array, shape (n_samples, n_neighbors) Indices of the approximate nearest points in the population matrix. """ if not hasattr(self, 'hash_functions_'): raise ValueError("estimator should be fitted.") if n_neighbors is None: n_neighbors = self.n_neighbors X = check_array(X, accept_sparse='csr') neighbors, distances = [], [] bin_queries, max_depth = self._query(X) for i in range(X.shape[0]): neighs, dists = self._get_candidates(X[i], max_depth[i], bin_queries[i], n_neighbors) neighbors.append(neighs) distances.append(dists) if return_distance: return np.array(distances), np.array(neighbors) else: return np.array(neighbors) def radius_neighbors(self, X, radius=None, return_distance=True): """Finds the neighbors within a given radius of a point or points. Return the indices and distances of some points from the dataset lying in a ball with size ``radius`` around the points of the query array. Points lying on the boundary are included in the results. The result points are *not* necessarily sorted by distance to their query point. LSH Forest being an approximate method, some true neighbors from the indexed dataset might be missing from the results. Parameters ---------- X : array_like or sparse (CSR) matrix, shape (n_samples, n_features) List of n_features-dimensional data points. Each row corresponds to a single query. radius : float Limiting distance of neighbors to return. (default is the value passed to the constructor). return_distance : boolean, optional (default = False) Returns the distances of neighbors if set to True. Returns ------- dist : array, shape (n_samples,) of arrays Each element is an array representing the cosine distances to some points found within ``radius`` of the respective query. Only present if ``return_distance=True``. ind : array, shape (n_samples,) of arrays Each element is an array of indices for neighbors within ``radius`` of the respective query. """ if not hasattr(self, 'hash_functions_'): raise ValueError("estimator should be fitted.") if radius is None: radius = self.radius X = check_array(X, accept_sparse='csr') neighbors, distances = [], [] bin_queries, max_depth = self._query(X) for i in range(X.shape[0]): neighs, dists = self._get_radius_neighbors(X[i], max_depth[i], bin_queries[i], radius) neighbors.append(neighs) distances.append(dists) if return_distance: return _array_of_arrays(distances), _array_of_arrays(neighbors) else: return _array_of_arrays(neighbors) def partial_fit(self, X, y=None): """ Inserts new data into the already fitted LSH Forest. Cost is proportional to new total size, so additions should be batched. Parameters ---------- X : array_like or sparse (CSR) matrix, shape (n_samples, n_features) New data point to be inserted into the LSH Forest. """ X = check_array(X, accept_sparse='csr') if not hasattr(self, 'hash_functions_'): return self.fit(X) if X.shape[1] != self._fit_X.shape[1]: raise ValueError("Number of features in X and" " fitted array does not match.") n_samples = X.shape[0] n_indexed = self._fit_X.shape[0] for i in range(self.n_estimators): bin_X = self.hash_functions_[i].transform(X)[:, 0] # gets the position to be added in the tree. positions = self.trees_[i].searchsorted(bin_X) # adds the hashed value into the tree. self.trees_[i] = np.insert(self.trees_[i], positions, bin_X) # add the entry into the original_indices_. self.original_indices_[i] = np.insert(self.original_indices_[i], positions, np.arange(n_indexed, n_indexed + n_samples)) # adds the entry into the input_array. if sparse.issparse(X) or sparse.issparse(self._fit_X): self._fit_X = sparse.vstack((self._fit_X, X)) else: self._fit_X = np.row_stack((self._fit_X, X)) return self
bsd-3-clause
BjerknesClimateDataCentre/QuinCe
external_scripts/NRT/Saildrone_conversion/saildrone_module/check_request.py
2
2196
############################################################################### ### FUNCTIONS WHICH CHECKS THE NEXT REQUEST ### ############################################################################### ### Description: # Function which returns a list of what to request for download. Drones are # removed from the request list if they are on the ignore list, OR if any of # the following are not available: the drone itself, any of the dataset typs, # or the start date. #------------------------------------------------------------------------------ import pandas as pd def check_next_request(next_request, access_list, datasets, drones_ignored): next_request_checked = dict(next_request) for drone, start in next_request.items(): # Remove drone from the requst list if it is on the ignore list if drone in drones_ignored: del next_request_checked[drone] continue # Find what's available for the drone in question available = [dictionary for dictionary in access_list if str(dictionary['drone_id']) == drone] # Remove drone from next request if it is no longer available if not available: # !!! Send message to slack. Temp solution: print("Drone ", drone," no longer available.") del next_request_checked[drone] continue # Check if datasets we want are available (try-except is used here in # order to continue from the main for loop) try: for dataset in datasets: if dataset not in available[0]['data_set']: # !!! Send message to slack. Temp solution: print(dataset, " dataset not available for drone ", drone) del next_request_checked[drone] raise Exception() except Exception: continue # Check if the start we want to request is available. First convert to # dateformat. start_request = pd.to_datetime(start, format='%Y-%m-%dT%H:%M:%S.%fZ') start_available = pd.to_datetime(available[0]['start_date'], format='%Y-%m-%dT%H:%M:%S.%fZ') if start_request < start_available: #!!! Send message to slack. Temp solution print("Next start to request for ", drone, " is not available.") del next_request_checked[drone] continue return next_request_checked
gpl-3.0
DonBeo/scikit-learn
sklearn/neighbors/tests/test_kde.py
13
5622
import numpy as np from sklearn.utils.testing import (assert_allclose, assert_raises, assert_equal) from sklearn.neighbors import KernelDensity, KDTree, NearestNeighbors from sklearn.neighbors.ball_tree import kernel_norm from sklearn.pipeline import make_pipeline from sklearn.datasets import make_blobs from sklearn.grid_search import GridSearchCV from sklearn.preprocessing import StandardScaler def compute_kernel_slow(Y, X, kernel, h): d = np.sqrt(((Y[:, None, :] - X) ** 2).sum(-1)) norm = kernel_norm(h, X.shape[1], kernel) / X.shape[0] if kernel == 'gaussian': return norm * np.exp(-0.5 * (d * d) / (h * h)).sum(-1) elif kernel == 'tophat': return norm * (d < h).sum(-1) elif kernel == 'epanechnikov': return norm * ((1.0 - (d * d) / (h * h)) * (d < h)).sum(-1) elif kernel == 'exponential': return norm * (np.exp(-d / h)).sum(-1) elif kernel == 'linear': return norm * ((1 - d / h) * (d < h)).sum(-1) elif kernel == 'cosine': return norm * (np.cos(0.5 * np.pi * d / h) * (d < h)).sum(-1) else: raise ValueError('kernel not recognized') def test_kernel_density(n_samples=100, n_features=3): rng = np.random.RandomState(0) X = rng.randn(n_samples, n_features) Y = rng.randn(n_samples, n_features) for kernel in ['gaussian', 'tophat', 'epanechnikov', 'exponential', 'linear', 'cosine']: for bandwidth in [0.01, 0.1, 1]: dens_true = compute_kernel_slow(Y, X, kernel, bandwidth) def check_results(kernel, bandwidth, atol, rtol): kde = KernelDensity(kernel=kernel, bandwidth=bandwidth, atol=atol, rtol=rtol) log_dens = kde.fit(X).score_samples(Y) assert_allclose(np.exp(log_dens), dens_true, atol=atol, rtol=max(1E-7, rtol)) assert_allclose(np.exp(kde.score(Y)), np.prod(dens_true), atol=atol, rtol=max(1E-7, rtol)) for rtol in [0, 1E-5]: for atol in [1E-6, 1E-2]: for breadth_first in (True, False): yield (check_results, kernel, bandwidth, atol, rtol) def test_kernel_density_sampling(n_samples=100, n_features=3): rng = np.random.RandomState(0) X = rng.randn(n_samples, n_features) bandwidth = 0.2 for kernel in ['gaussian', 'tophat']: # draw a tophat sample kde = KernelDensity(bandwidth, kernel=kernel).fit(X) samp = kde.sample(100) assert_equal(X.shape, samp.shape) # check that samples are in the right range nbrs = NearestNeighbors(n_neighbors=1).fit(X) dist, ind = nbrs.kneighbors(X, return_distance=True) if kernel == 'tophat': assert np.all(dist < bandwidth) elif kernel == 'gaussian': # 5 standard deviations is safe for 100 samples, but there's a # very small chance this test could fail. assert np.all(dist < 5 * bandwidth) # check unsupported kernels for kernel in ['epanechnikov', 'exponential', 'linear', 'cosine']: kde = KernelDensity(bandwidth, kernel=kernel).fit(X) assert_raises(NotImplementedError, kde.sample, 100) # non-regression test: used to return a scalar X = rng.randn(4, 1) kde = KernelDensity(kernel="gaussian").fit(X) assert_equal(kde.sample().shape, (1, 1)) def test_kde_algorithm_metric_choice(): # Smoke test for various metrics and algorithms rng = np.random.RandomState(0) X = rng.randn(10, 2) # 2 features required for haversine dist. Y = rng.randn(10, 2) for algorithm in ['auto', 'ball_tree', 'kd_tree']: for metric in ['euclidean', 'minkowski', 'manhattan', 'chebyshev', 'haversine']: if algorithm == 'kd_tree' and metric not in KDTree.valid_metrics: assert_raises(ValueError, KernelDensity, algorithm=algorithm, metric=metric) else: kde = KernelDensity(algorithm=algorithm, metric=metric) kde.fit(X) y_dens = kde.score_samples(Y) assert_equal(y_dens.shape, Y.shape[:1]) def test_kde_score(n_samples=100, n_features=3): pass #FIXME #np.random.seed(0) #X = np.random.random((n_samples, n_features)) #Y = np.random.random((n_samples, n_features)) def test_kde_badargs(): assert_raises(ValueError, KernelDensity, algorithm='blah') assert_raises(ValueError, KernelDensity, bandwidth=0) assert_raises(ValueError, KernelDensity, kernel='blah') assert_raises(ValueError, KernelDensity, metric='blah') assert_raises(ValueError, KernelDensity, algorithm='kd_tree', metric='blah') def test_kde_pipeline_gridsearch(): # test that kde plays nice in pipelines and grid-searches X, _ = make_blobs(cluster_std=.1, random_state=1, centers=[[0, 1], [1, 0], [0, 0]]) pipe1 = make_pipeline(StandardScaler(with_mean=False, with_std=False), KernelDensity(kernel="gaussian")) params = dict(kerneldensity__bandwidth=[0.001, 0.01, 0.1, 1, 10]) search = GridSearchCV(pipe1, param_grid=params, cv=5) search.fit(X) assert_equal(search.best_params_['kerneldensity__bandwidth'], .1) if __name__ == '__main__': import nose nose.runmodule()
bsd-3-clause
ChadFulton/statsmodels
statsmodels/tsa/filters/hp_filter.py
4
3157
from __future__ import absolute_import from scipy import sparse from scipy.sparse.linalg import spsolve import numpy as np from ._utils import _maybe_get_pandas_wrapper def hpfilter(X, lamb=1600): """ Hodrick-Prescott filter Parameters ---------- X : array-like The 1d ndarray timeseries to filter of length (nobs,) or (nobs,1) lamb : float The Hodrick-Prescott smoothing parameter. A value of 1600 is suggested for quarterly data. Ravn and Uhlig suggest using a value of 6.25 (1600/4**4) for annual data and 129600 (1600*3**4) for monthly data. Returns ------- cycle : array The estimated cycle in the data given lamb. trend : array The estimated trend in the data given lamb. Examples --------- >>> import statsmodels.api as sm >>> import pandas as pd >>> dta = sm.datasets.macrodata.load_pandas().data >>> index = pd.DatetimeIndex(start='1959Q1', end='2009Q4', freq='Q') >>> dta.set_index(index, inplace=True) >>> cycle, trend = sm.tsa.filters.hpfilter(dta.realgdp, 1600) >>> gdp_decomp = dta[['realgdp']] >>> gdp_decomp["cycle"] = cycle >>> gdp_decomp["trend"] = trend >>> import matplotlib.pyplot as plt >>> fig, ax = plt.subplots() >>> gdp_decomp[["realgdp", "trend"]]["2000-03-31":].plot(ax=ax, ... fontsize=16); >>> plt.show() .. plot:: plots/hpf_plot.py Notes ----- The HP filter removes a smooth trend, `T`, from the data `X`. by solving min sum((X[t] - T[t])**2 + lamb*((T[t+1] - T[t]) - (T[t] - T[t-1]))**2) T t Here we implemented the HP filter as a ridge-regression rule using scipy.sparse. In this sense, the solution can be written as T = inv(I - lamb*K'K)X where I is a nobs x nobs identity matrix, and K is a (nobs-2) x nobs matrix such that K[i,j] = 1 if i == j or i == j + 2 K[i,j] = -2 if i == j + 1 K[i,j] = 0 otherwise See Also -------- statsmodels.tsa.filters.bk_filter.bkfilter statsmodels.tsa.filters.cf_filter.cffilter statsmodels.tsa.seasonal.seasonal_decompose References ---------- Hodrick, R.J, and E. C. Prescott. 1980. "Postwar U.S. Business Cycles: An Empricial Investigation." `Carnegie Mellon University discussion paper no. 451`. Ravn, M.O and H. Uhlig. 2002. "Notes On Adjusted the Hodrick-Prescott Filter for the Frequency of Observations." `The Review of Economics and Statistics`, 84(2), 371-80. """ _pandas_wrapper = _maybe_get_pandas_wrapper(X) X = np.asarray(X, float) if X.ndim > 1: X = X.squeeze() nobs = len(X) I = sparse.eye(nobs,nobs) offsets = np.array([0,1,2]) data = np.repeat([[1.],[-2.],[1.]], nobs, axis=1) K = sparse.dia_matrix((data, offsets), shape=(nobs-2,nobs)) use_umfpack = True trend = spsolve(I+lamb*K.T.dot(K), X, use_umfpack=use_umfpack) cycle = X-trend if _pandas_wrapper is not None: return _pandas_wrapper(cycle), _pandas_wrapper(trend) return cycle, trend
bsd-3-clause
tapomayukh/projects_in_python
sandbox_tapo/src/skin_related/BMED_8813_HAP/Features/single_feature/results/cross_validate_categories_BMED_8813_HAP_scaled_method_II_shape.py
1
4028
# Principal Component Analysis Code : from numpy import mean,cov,double,cumsum,dot,linalg,array,rank,size,flipud from pylab import * import numpy as np import matplotlib.pyplot as pp #from enthought.mayavi import mlab import scipy.ndimage as ni import roslib; roslib.load_manifest('sandbox_tapo_darpa_m3') import rospy #import hrl_lib.mayavi2_util as mu import hrl_lib.viz as hv import hrl_lib.util as ut import hrl_lib.matplotlib_util as mpu import pickle from mvpa.clfs.knn import kNN from mvpa.datasets import Dataset from mvpa.clfs.transerror import TransferError from mvpa.misc.data_generators import normalFeatureDataset from mvpa.algorithms.cvtranserror import CrossValidatedTransferError from mvpa.datasets.splitters import NFoldSplitter import sys sys.path.insert(0, '/home/tapo/svn/robot1_data/usr/tapo/data_code/BMED_8813_HAP/Data') from data import Fmat_original def pca(X): #get dimensions num_data,dim = X.shape #center data mean_X = X.mean(axis=1) M = (X-mean_X) # subtract the mean (along columns) Mcov = cov(M) ###### Sanity Check ###### i=0 n=0 while i < 41: j=0 while j < 90: if X[i,j] != X[i,j]: print X[i,j] print i,j n=n+1 j = j+1 i=i+1 print n ########################## print 'PCA - COV-Method used' val,vec = linalg.eig(Mcov) #return the projection matrix, the variance and the mean return vec,val,mean_X, M, Mcov if __name__ == '__main__': Fmat = Fmat_original[0:41,:] # Checking the Data-Matrix m_tot, n_tot = np.shape(Fmat) print 'Total_Matrix_Shape:',m_tot,n_tot eigvec_total, eigval_total, mean_data_total, B, C = pca(Fmat) #print eigvec_total #print eigval_total #print mean_data_total m_eigval_total, n_eigval_total = np.shape(np.matrix(eigval_total)) m_eigvec_total, n_eigvec_total = np.shape(eigvec_total) m_mean_data_total, n_mean_data_total = np.shape(np.matrix(mean_data_total)) print 'Eigenvalue Shape:',m_eigval_total, n_eigval_total print 'Eigenvector Shape:',m_eigvec_total, n_eigvec_total print 'Mean-Data Shape:',m_mean_data_total, n_mean_data_total #Recall that the cumulative sum of the eigenvalues shows the level of variance accounted by each of the corresponding eigenvectors. On the x axis there is the number of eigenvalues used. perc_total = cumsum(eigval_total)/sum(eigval_total) # Reduced Eigen-Vector Matrix according to highest Eigenvalues..(Considering First 20 based on above figure) W = eigvec_total[:,0:13] m_W, n_W = np.shape(W) print 'Reduced Dimension Eigenvector Shape:',m_W, n_W #Projected Data: Y = (W.T)*B m_Y, n_Y = np.shape(Y.T) print 'Transposed Projected Data Shape:', m_Y, n_Y #Using PYMVPA PCA_data = np.array(Y.T) PCA_label_1 = ['Edge-1']*30 + ['Surface']*30 + ['Edge-2']*30 PCA_chunk_1 = ['Can-Edge-1']*5 + ['Book-Edge-1']*5 + ['Brown-Cardboard-Box-Edge-1']*5 + ['Cinder-Block-Edge-1']*5 + ['Tin-Box-Edge-1']*5 + ['White-Cardboard-Box-Edge-1']*5 + ['Can-Surface']*5 + ['Book-Surface']*5 + ['Brown-Cardboard-Box-Surface']*5 + ['Cinder-Block-Surface']*5 + ['Tin-Box-Surface']*5 + ['White-Cardboard-Box-Surface']*5 + ['Can-Edge-2']*5 + ['Book-Edge-2']*5 + ['Brown-Cardboard-Box-Edge-2']*5 + ['Cinder-Block-Edge-2']*5 + ['Tin-Box-Edge-2']*5 + ['White-Cardboard-Box-Edge-2']*5 clf = kNN(k=2) terr = TransferError(clf) ds1 = Dataset(samples=PCA_data,labels=PCA_label_1,chunks=PCA_chunk_1) print ds1.samples.shape cvterr = CrossValidatedTransferError(terr,NFoldSplitter(cvtype=1),enable_states=['confusion']) error = cvterr(ds1) print error print cvterr.confusion.asstring(description=False) figure(1) cvterr.confusion.plot(numbers='True') # Variances figure(2) title('Variances of PCs') stem(range(len(perc_total)),perc_total,'--b') axis([-0.3,30.3,0,1.2]) grid('True') show()
mit
heli522/scikit-learn
benchmarks/bench_lasso.py
295
3305
""" Benchmarks of Lasso vs LassoLars First, we fix a training set and increase the number of samples. Then we plot the computation time as function of the number of samples. In the second benchmark, we increase the number of dimensions of the training set. Then we plot the computation time as function of the number of dimensions. In both cases, only 10% of the features are informative. """ import gc from time import time import numpy as np from sklearn.datasets.samples_generator import make_regression def compute_bench(alpha, n_samples, n_features, precompute): lasso_results = [] lars_lasso_results = [] it = 0 for ns in n_samples: for nf in n_features: it += 1 print('==================') print('Iteration %s of %s' % (it, max(len(n_samples), len(n_features)))) print('==================') n_informative = nf // 10 X, Y, coef_ = make_regression(n_samples=ns, n_features=nf, n_informative=n_informative, noise=0.1, coef=True) X /= np.sqrt(np.sum(X ** 2, axis=0)) # Normalize data gc.collect() print("- benchmarking Lasso") clf = Lasso(alpha=alpha, fit_intercept=False, precompute=precompute) tstart = time() clf.fit(X, Y) lasso_results.append(time() - tstart) gc.collect() print("- benchmarking LassoLars") clf = LassoLars(alpha=alpha, fit_intercept=False, normalize=False, precompute=precompute) tstart = time() clf.fit(X, Y) lars_lasso_results.append(time() - tstart) return lasso_results, lars_lasso_results if __name__ == '__main__': from sklearn.linear_model import Lasso, LassoLars import pylab as pl alpha = 0.01 # regularization parameter n_features = 10 list_n_samples = np.linspace(100, 1000000, 5).astype(np.int) lasso_results, lars_lasso_results = compute_bench(alpha, list_n_samples, [n_features], precompute=True) pl.figure('scikit-learn LASSO benchmark results') pl.subplot(211) pl.plot(list_n_samples, lasso_results, 'b-', label='Lasso') pl.plot(list_n_samples, lars_lasso_results, 'r-', label='LassoLars') pl.title('precomputed Gram matrix, %d features, alpha=%s' % (n_features, alpha)) pl.legend(loc='upper left') pl.xlabel('number of samples') pl.ylabel('Time (s)') pl.axis('tight') n_samples = 2000 list_n_features = np.linspace(500, 3000, 5).astype(np.int) lasso_results, lars_lasso_results = compute_bench(alpha, [n_samples], list_n_features, precompute=False) pl.subplot(212) pl.plot(list_n_features, lasso_results, 'b-', label='Lasso') pl.plot(list_n_features, lars_lasso_results, 'r-', label='LassoLars') pl.title('%d samples, alpha=%s' % (n_samples, alpha)) pl.legend(loc='upper left') pl.xlabel('number of features') pl.ylabel('Time (s)') pl.axis('tight') pl.show()
bsd-3-clause
sidmitra/django_nonrel_testapp
django/contrib/gis/utils/geoip.py
316
14811
""" This module houses the GeoIP object, a ctypes wrapper for the MaxMind GeoIP(R) C API (http://www.maxmind.com/app/c). This is an alternative to the GPL licensed Python GeoIP interface provided by MaxMind. GeoIP(R) is a registered trademark of MaxMind, LLC of Boston, Massachusetts. For IP-based geolocation, this module requires the GeoLite Country and City datasets, in binary format (CSV will not work!). The datasets may be downloaded from MaxMind at http://www.maxmind.com/download/geoip/database/. Grab GeoIP.dat.gz and GeoLiteCity.dat.gz, and unzip them in the directory corresponding to settings.GEOIP_PATH. See the GeoIP docstring and examples below for more details. TODO: Verify compatibility with Windows. Example: >>> from django.contrib.gis.utils import GeoIP >>> g = GeoIP() >>> g.country('google.com') {'country_code': 'US', 'country_name': 'United States'} >>> g.city('72.14.207.99') {'area_code': 650, 'city': 'Mountain View', 'country_code': 'US', 'country_code3': 'USA', 'country_name': 'United States', 'dma_code': 807, 'latitude': 37.419200897216797, 'longitude': -122.05740356445312, 'postal_code': '94043', 'region': 'CA'} >>> g.lat_lon('salon.com') (37.789798736572266, -122.39420318603516) >>> g.lon_lat('uh.edu') (-95.415199279785156, 29.77549934387207) >>> g.geos('24.124.1.80').wkt 'POINT (-95.2087020874023438 39.0392990112304688)' """ import os, re from ctypes import c_char_p, c_float, c_int, Structure, CDLL, POINTER from ctypes.util import find_library from django.conf import settings if not settings.configured: settings.configure() # Creating the settings dictionary with any settings, if needed. GEOIP_SETTINGS = dict((key, getattr(settings, key)) for key in ('GEOIP_PATH', 'GEOIP_LIBRARY_PATH', 'GEOIP_COUNTRY', 'GEOIP_CITY') if hasattr(settings, key)) lib_path = GEOIP_SETTINGS.get('GEOIP_LIBRARY_PATH', None) # GeoIP Exception class. class GeoIPException(Exception): pass # The shared library for the GeoIP C API. May be downloaded # from http://www.maxmind.com/download/geoip/api/c/ if lib_path: lib_name = None else: # TODO: Is this really the library name for Windows? lib_name = 'GeoIP' # Getting the path to the GeoIP library. if lib_name: lib_path = find_library(lib_name) if lib_path is None: raise GeoIPException('Could not find the GeoIP library (tried "%s"). ' 'Try setting GEOIP_LIBRARY_PATH in your settings.' % lib_name) lgeoip = CDLL(lib_path) # Regular expressions for recognizing IP addresses and the GeoIP # free database editions. ipregex = re.compile(r'^(?P<w>\d\d?\d?)\.(?P<x>\d\d?\d?)\.(?P<y>\d\d?\d?)\.(?P<z>\d\d?\d?)$') free_regex = re.compile(r'^GEO-\d{3}FREE') lite_regex = re.compile(r'^GEO-\d{3}LITE') #### GeoIP C Structure definitions #### class GeoIPRecord(Structure): _fields_ = [('country_code', c_char_p), ('country_code3', c_char_p), ('country_name', c_char_p), ('region', c_char_p), ('city', c_char_p), ('postal_code', c_char_p), ('latitude', c_float), ('longitude', c_float), # TODO: In 1.4.6 this changed from `int dma_code;` to # `union {int metro_code; int dma_code;};`. Change # to a `ctypes.Union` in to accomodate in future when # pre-1.4.6 versions are no longer distributed. ('dma_code', c_int), ('area_code', c_int), # TODO: The following structure fields were added in 1.4.3 -- # uncomment these fields when sure previous versions are no # longer distributed by package maintainers. #('charset', c_int), #('continent_code', c_char_p), ] class GeoIPTag(Structure): pass #### ctypes function prototypes #### RECTYPE = POINTER(GeoIPRecord) DBTYPE = POINTER(GeoIPTag) # For retrieving records by name or address. def record_output(func): func.restype = RECTYPE return func rec_by_addr = record_output(lgeoip.GeoIP_record_by_addr) rec_by_name = record_output(lgeoip.GeoIP_record_by_name) # For opening & closing GeoIP database files. geoip_open = lgeoip.GeoIP_open geoip_open.restype = DBTYPE geoip_close = lgeoip.GeoIP_delete geoip_close.argtypes = [DBTYPE] geoip_close.restype = None # String output routines. def string_output(func): func.restype = c_char_p return func geoip_dbinfo = string_output(lgeoip.GeoIP_database_info) cntry_code_by_addr = string_output(lgeoip.GeoIP_country_code_by_addr) cntry_code_by_name = string_output(lgeoip.GeoIP_country_code_by_name) cntry_name_by_addr = string_output(lgeoip.GeoIP_country_name_by_addr) cntry_name_by_name = string_output(lgeoip.GeoIP_country_name_by_name) #### GeoIP class #### class GeoIP(object): # The flags for GeoIP memory caching. # GEOIP_STANDARD - read database from filesystem, uses least memory. # # GEOIP_MEMORY_CACHE - load database into memory, faster performance # but uses more memory # # GEOIP_CHECK_CACHE - check for updated database. If database has been updated, # reload filehandle and/or memory cache. # # GEOIP_INDEX_CACHE - just cache # the most frequently accessed index portion of the database, resulting # in faster lookups than GEOIP_STANDARD, but less memory usage than # GEOIP_MEMORY_CACHE - useful for larger databases such as # GeoIP Organization and GeoIP City. Note, for GeoIP Country, Region # and Netspeed databases, GEOIP_INDEX_CACHE is equivalent to GEOIP_MEMORY_CACHE # GEOIP_STANDARD = 0 GEOIP_MEMORY_CACHE = 1 GEOIP_CHECK_CACHE = 2 GEOIP_INDEX_CACHE = 4 cache_options = dict((opt, None) for opt in (0, 1, 2, 4)) _city_file = '' _country_file = '' # Initially, pointers to GeoIP file references are NULL. _city = None _country = None def __init__(self, path=None, cache=0, country=None, city=None): """ Initializes the GeoIP object, no parameters are required to use default settings. Keyword arguments may be passed in to customize the locations of the GeoIP data sets. * path: Base directory to where GeoIP data is located or the full path to where the city or country data files (*.dat) are located. Assumes that both the city and country data sets are located in this directory; overrides the GEOIP_PATH settings attribute. * cache: The cache settings when opening up the GeoIP datasets, and may be an integer in (0, 1, 2, 4) corresponding to the GEOIP_STANDARD, GEOIP_MEMORY_CACHE, GEOIP_CHECK_CACHE, and GEOIP_INDEX_CACHE `GeoIPOptions` C API settings, respectively. Defaults to 0, meaning that the data is read from the disk. * country: The name of the GeoIP country data file. Defaults to 'GeoIP.dat'; overrides the GEOIP_COUNTRY settings attribute. * city: The name of the GeoIP city data file. Defaults to 'GeoLiteCity.dat'; overrides the GEOIP_CITY settings attribute. """ # Checking the given cache option. if cache in self.cache_options: self._cache = self.cache_options[cache] else: raise GeoIPException('Invalid caching option: %s' % cache) # Getting the GeoIP data path. if not path: path = GEOIP_SETTINGS.get('GEOIP_PATH', None) if not path: raise GeoIPException('GeoIP path must be provided via parameter or the GEOIP_PATH setting.') if not isinstance(path, basestring): raise TypeError('Invalid path type: %s' % type(path).__name__) if os.path.isdir(path): # Constructing the GeoIP database filenames using the settings # dictionary. If the database files for the GeoLite country # and/or city datasets exist, then try and open them. country_db = os.path.join(path, country or GEOIP_SETTINGS.get('GEOIP_COUNTRY', 'GeoIP.dat')) if os.path.isfile(country_db): self._country = geoip_open(country_db, cache) self._country_file = country_db city_db = os.path.join(path, city or GEOIP_SETTINGS.get('GEOIP_CITY', 'GeoLiteCity.dat')) if os.path.isfile(city_db): self._city = geoip_open(city_db, cache) self._city_file = city_db elif os.path.isfile(path): # Otherwise, some detective work will be needed to figure # out whether the given database path is for the GeoIP country # or city databases. ptr = geoip_open(path, cache) info = geoip_dbinfo(ptr) if lite_regex.match(info): # GeoLite City database detected. self._city = ptr self._city_file = path elif free_regex.match(info): # GeoIP Country database detected. self._country = ptr self._country_file = path else: raise GeoIPException('Unable to recognize database edition: %s' % info) else: raise GeoIPException('GeoIP path must be a valid file or directory.') def __del__(self): # Cleaning any GeoIP file handles lying around. if self._country: geoip_close(self._country) if self._city: geoip_close(self._city) def _check_query(self, query, country=False, city=False, city_or_country=False): "Helper routine for checking the query and database availability." # Making sure a string was passed in for the query. if not isinstance(query, basestring): raise TypeError('GeoIP query must be a string, not type %s' % type(query).__name__) # Extra checks for the existence of country and city databases. if city_or_country and not (self._country or self._city): raise GeoIPException('Invalid GeoIP country and city data files.') elif country and not self._country: raise GeoIPException('Invalid GeoIP country data file: %s' % self._country_file) elif city and not self._city: raise GeoIPException('Invalid GeoIP city data file: %s' % self._city_file) def city(self, query): """ Returns a dictionary of city information for the given IP address or Fully Qualified Domain Name (FQDN). Some information in the dictionary may be undefined (None). """ self._check_query(query, city=True) if ipregex.match(query): # If an IP address was passed in ptr = rec_by_addr(self._city, c_char_p(query)) else: # If a FQDN was passed in. ptr = rec_by_name(self._city, c_char_p(query)) # Checking the pointer to the C structure, if valid pull out elements # into a dicionary and return. if bool(ptr): record = ptr.contents return dict((tup[0], getattr(record, tup[0])) for tup in record._fields_) else: return None def country_code(self, query): "Returns the country code for the given IP Address or FQDN." self._check_query(query, city_or_country=True) if self._country: if ipregex.match(query): return cntry_code_by_addr(self._country, query) else: return cntry_code_by_name(self._country, query) else: return self.city(query)['country_code'] def country_name(self, query): "Returns the country name for the given IP Address or FQDN." self._check_query(query, city_or_country=True) if self._country: if ipregex.match(query): return cntry_name_by_addr(self._country, query) else: return cntry_name_by_name(self._country, query) else: return self.city(query)['country_name'] def country(self, query): """ Returns a dictonary with with the country code and name when given an IP address or a Fully Qualified Domain Name (FQDN). For example, both '24.124.1.80' and 'djangoproject.com' are valid parameters. """ # Returning the country code and name return {'country_code' : self.country_code(query), 'country_name' : self.country_name(query), } #### Coordinate retrieval routines #### def coords(self, query, ordering=('longitude', 'latitude')): cdict = self.city(query) if cdict is None: return None else: return tuple(cdict[o] for o in ordering) def lon_lat(self, query): "Returns a tuple of the (longitude, latitude) for the given query." return self.coords(query) def lat_lon(self, query): "Returns a tuple of the (latitude, longitude) for the given query." return self.coords(query, ('latitude', 'longitude')) def geos(self, query): "Returns a GEOS Point object for the given query." ll = self.lon_lat(query) if ll: from django.contrib.gis.geos import Point return Point(ll, srid=4326) else: return None #### GeoIP Database Information Routines #### def country_info(self): "Returns information about the GeoIP country database." if self._country is None: ci = 'No GeoIP Country data in "%s"' % self._country_file else: ci = geoip_dbinfo(self._country) return ci country_info = property(country_info) def city_info(self): "Retuns information about the GeoIP city database." if self._city is None: ci = 'No GeoIP City data in "%s"' % self._city_file else: ci = geoip_dbinfo(self._city) return ci city_info = property(city_info) def info(self): "Returns information about all GeoIP databases in use." return 'Country:\n\t%s\nCity:\n\t%s' % (self.country_info, self.city_info) info = property(info) #### Methods for compatibility w/the GeoIP-Python API. #### @classmethod def open(cls, full_path, cache): return GeoIP(full_path, cache) def _rec_by_arg(self, arg): if self._city: return self.city(arg) else: return self.country(arg) region_by_addr = city region_by_name = city record_by_addr = _rec_by_arg record_by_name = _rec_by_arg country_code_by_addr = country_code country_code_by_name = country_code country_name_by_addr = country_name country_name_by_name = country_name
bsd-3-clause
wd5/jangr
django/contrib/gis/utils/geoip.py
316
14811
""" This module houses the GeoIP object, a ctypes wrapper for the MaxMind GeoIP(R) C API (http://www.maxmind.com/app/c). This is an alternative to the GPL licensed Python GeoIP interface provided by MaxMind. GeoIP(R) is a registered trademark of MaxMind, LLC of Boston, Massachusetts. For IP-based geolocation, this module requires the GeoLite Country and City datasets, in binary format (CSV will not work!). The datasets may be downloaded from MaxMind at http://www.maxmind.com/download/geoip/database/. Grab GeoIP.dat.gz and GeoLiteCity.dat.gz, and unzip them in the directory corresponding to settings.GEOIP_PATH. See the GeoIP docstring and examples below for more details. TODO: Verify compatibility with Windows. Example: >>> from django.contrib.gis.utils import GeoIP >>> g = GeoIP() >>> g.country('google.com') {'country_code': 'US', 'country_name': 'United States'} >>> g.city('72.14.207.99') {'area_code': 650, 'city': 'Mountain View', 'country_code': 'US', 'country_code3': 'USA', 'country_name': 'United States', 'dma_code': 807, 'latitude': 37.419200897216797, 'longitude': -122.05740356445312, 'postal_code': '94043', 'region': 'CA'} >>> g.lat_lon('salon.com') (37.789798736572266, -122.39420318603516) >>> g.lon_lat('uh.edu') (-95.415199279785156, 29.77549934387207) >>> g.geos('24.124.1.80').wkt 'POINT (-95.2087020874023438 39.0392990112304688)' """ import os, re from ctypes import c_char_p, c_float, c_int, Structure, CDLL, POINTER from ctypes.util import find_library from django.conf import settings if not settings.configured: settings.configure() # Creating the settings dictionary with any settings, if needed. GEOIP_SETTINGS = dict((key, getattr(settings, key)) for key in ('GEOIP_PATH', 'GEOIP_LIBRARY_PATH', 'GEOIP_COUNTRY', 'GEOIP_CITY') if hasattr(settings, key)) lib_path = GEOIP_SETTINGS.get('GEOIP_LIBRARY_PATH', None) # GeoIP Exception class. class GeoIPException(Exception): pass # The shared library for the GeoIP C API. May be downloaded # from http://www.maxmind.com/download/geoip/api/c/ if lib_path: lib_name = None else: # TODO: Is this really the library name for Windows? lib_name = 'GeoIP' # Getting the path to the GeoIP library. if lib_name: lib_path = find_library(lib_name) if lib_path is None: raise GeoIPException('Could not find the GeoIP library (tried "%s"). ' 'Try setting GEOIP_LIBRARY_PATH in your settings.' % lib_name) lgeoip = CDLL(lib_path) # Regular expressions for recognizing IP addresses and the GeoIP # free database editions. ipregex = re.compile(r'^(?P<w>\d\d?\d?)\.(?P<x>\d\d?\d?)\.(?P<y>\d\d?\d?)\.(?P<z>\d\d?\d?)$') free_regex = re.compile(r'^GEO-\d{3}FREE') lite_regex = re.compile(r'^GEO-\d{3}LITE') #### GeoIP C Structure definitions #### class GeoIPRecord(Structure): _fields_ = [('country_code', c_char_p), ('country_code3', c_char_p), ('country_name', c_char_p), ('region', c_char_p), ('city', c_char_p), ('postal_code', c_char_p), ('latitude', c_float), ('longitude', c_float), # TODO: In 1.4.6 this changed from `int dma_code;` to # `union {int metro_code; int dma_code;};`. Change # to a `ctypes.Union` in to accomodate in future when # pre-1.4.6 versions are no longer distributed. ('dma_code', c_int), ('area_code', c_int), # TODO: The following structure fields were added in 1.4.3 -- # uncomment these fields when sure previous versions are no # longer distributed by package maintainers. #('charset', c_int), #('continent_code', c_char_p), ] class GeoIPTag(Structure): pass #### ctypes function prototypes #### RECTYPE = POINTER(GeoIPRecord) DBTYPE = POINTER(GeoIPTag) # For retrieving records by name or address. def record_output(func): func.restype = RECTYPE return func rec_by_addr = record_output(lgeoip.GeoIP_record_by_addr) rec_by_name = record_output(lgeoip.GeoIP_record_by_name) # For opening & closing GeoIP database files. geoip_open = lgeoip.GeoIP_open geoip_open.restype = DBTYPE geoip_close = lgeoip.GeoIP_delete geoip_close.argtypes = [DBTYPE] geoip_close.restype = None # String output routines. def string_output(func): func.restype = c_char_p return func geoip_dbinfo = string_output(lgeoip.GeoIP_database_info) cntry_code_by_addr = string_output(lgeoip.GeoIP_country_code_by_addr) cntry_code_by_name = string_output(lgeoip.GeoIP_country_code_by_name) cntry_name_by_addr = string_output(lgeoip.GeoIP_country_name_by_addr) cntry_name_by_name = string_output(lgeoip.GeoIP_country_name_by_name) #### GeoIP class #### class GeoIP(object): # The flags for GeoIP memory caching. # GEOIP_STANDARD - read database from filesystem, uses least memory. # # GEOIP_MEMORY_CACHE - load database into memory, faster performance # but uses more memory # # GEOIP_CHECK_CACHE - check for updated database. If database has been updated, # reload filehandle and/or memory cache. # # GEOIP_INDEX_CACHE - just cache # the most frequently accessed index portion of the database, resulting # in faster lookups than GEOIP_STANDARD, but less memory usage than # GEOIP_MEMORY_CACHE - useful for larger databases such as # GeoIP Organization and GeoIP City. Note, for GeoIP Country, Region # and Netspeed databases, GEOIP_INDEX_CACHE is equivalent to GEOIP_MEMORY_CACHE # GEOIP_STANDARD = 0 GEOIP_MEMORY_CACHE = 1 GEOIP_CHECK_CACHE = 2 GEOIP_INDEX_CACHE = 4 cache_options = dict((opt, None) for opt in (0, 1, 2, 4)) _city_file = '' _country_file = '' # Initially, pointers to GeoIP file references are NULL. _city = None _country = None def __init__(self, path=None, cache=0, country=None, city=None): """ Initializes the GeoIP object, no parameters are required to use default settings. Keyword arguments may be passed in to customize the locations of the GeoIP data sets. * path: Base directory to where GeoIP data is located or the full path to where the city or country data files (*.dat) are located. Assumes that both the city and country data sets are located in this directory; overrides the GEOIP_PATH settings attribute. * cache: The cache settings when opening up the GeoIP datasets, and may be an integer in (0, 1, 2, 4) corresponding to the GEOIP_STANDARD, GEOIP_MEMORY_CACHE, GEOIP_CHECK_CACHE, and GEOIP_INDEX_CACHE `GeoIPOptions` C API settings, respectively. Defaults to 0, meaning that the data is read from the disk. * country: The name of the GeoIP country data file. Defaults to 'GeoIP.dat'; overrides the GEOIP_COUNTRY settings attribute. * city: The name of the GeoIP city data file. Defaults to 'GeoLiteCity.dat'; overrides the GEOIP_CITY settings attribute. """ # Checking the given cache option. if cache in self.cache_options: self._cache = self.cache_options[cache] else: raise GeoIPException('Invalid caching option: %s' % cache) # Getting the GeoIP data path. if not path: path = GEOIP_SETTINGS.get('GEOIP_PATH', None) if not path: raise GeoIPException('GeoIP path must be provided via parameter or the GEOIP_PATH setting.') if not isinstance(path, basestring): raise TypeError('Invalid path type: %s' % type(path).__name__) if os.path.isdir(path): # Constructing the GeoIP database filenames using the settings # dictionary. If the database files for the GeoLite country # and/or city datasets exist, then try and open them. country_db = os.path.join(path, country or GEOIP_SETTINGS.get('GEOIP_COUNTRY', 'GeoIP.dat')) if os.path.isfile(country_db): self._country = geoip_open(country_db, cache) self._country_file = country_db city_db = os.path.join(path, city or GEOIP_SETTINGS.get('GEOIP_CITY', 'GeoLiteCity.dat')) if os.path.isfile(city_db): self._city = geoip_open(city_db, cache) self._city_file = city_db elif os.path.isfile(path): # Otherwise, some detective work will be needed to figure # out whether the given database path is for the GeoIP country # or city databases. ptr = geoip_open(path, cache) info = geoip_dbinfo(ptr) if lite_regex.match(info): # GeoLite City database detected. self._city = ptr self._city_file = path elif free_regex.match(info): # GeoIP Country database detected. self._country = ptr self._country_file = path else: raise GeoIPException('Unable to recognize database edition: %s' % info) else: raise GeoIPException('GeoIP path must be a valid file or directory.') def __del__(self): # Cleaning any GeoIP file handles lying around. if self._country: geoip_close(self._country) if self._city: geoip_close(self._city) def _check_query(self, query, country=False, city=False, city_or_country=False): "Helper routine for checking the query and database availability." # Making sure a string was passed in for the query. if not isinstance(query, basestring): raise TypeError('GeoIP query must be a string, not type %s' % type(query).__name__) # Extra checks for the existence of country and city databases. if city_or_country and not (self._country or self._city): raise GeoIPException('Invalid GeoIP country and city data files.') elif country and not self._country: raise GeoIPException('Invalid GeoIP country data file: %s' % self._country_file) elif city and not self._city: raise GeoIPException('Invalid GeoIP city data file: %s' % self._city_file) def city(self, query): """ Returns a dictionary of city information for the given IP address or Fully Qualified Domain Name (FQDN). Some information in the dictionary may be undefined (None). """ self._check_query(query, city=True) if ipregex.match(query): # If an IP address was passed in ptr = rec_by_addr(self._city, c_char_p(query)) else: # If a FQDN was passed in. ptr = rec_by_name(self._city, c_char_p(query)) # Checking the pointer to the C structure, if valid pull out elements # into a dicionary and return. if bool(ptr): record = ptr.contents return dict((tup[0], getattr(record, tup[0])) for tup in record._fields_) else: return None def country_code(self, query): "Returns the country code for the given IP Address or FQDN." self._check_query(query, city_or_country=True) if self._country: if ipregex.match(query): return cntry_code_by_addr(self._country, query) else: return cntry_code_by_name(self._country, query) else: return self.city(query)['country_code'] def country_name(self, query): "Returns the country name for the given IP Address or FQDN." self._check_query(query, city_or_country=True) if self._country: if ipregex.match(query): return cntry_name_by_addr(self._country, query) else: return cntry_name_by_name(self._country, query) else: return self.city(query)['country_name'] def country(self, query): """ Returns a dictonary with with the country code and name when given an IP address or a Fully Qualified Domain Name (FQDN). For example, both '24.124.1.80' and 'djangoproject.com' are valid parameters. """ # Returning the country code and name return {'country_code' : self.country_code(query), 'country_name' : self.country_name(query), } #### Coordinate retrieval routines #### def coords(self, query, ordering=('longitude', 'latitude')): cdict = self.city(query) if cdict is None: return None else: return tuple(cdict[o] for o in ordering) def lon_lat(self, query): "Returns a tuple of the (longitude, latitude) for the given query." return self.coords(query) def lat_lon(self, query): "Returns a tuple of the (latitude, longitude) for the given query." return self.coords(query, ('latitude', 'longitude')) def geos(self, query): "Returns a GEOS Point object for the given query." ll = self.lon_lat(query) if ll: from django.contrib.gis.geos import Point return Point(ll, srid=4326) else: return None #### GeoIP Database Information Routines #### def country_info(self): "Returns information about the GeoIP country database." if self._country is None: ci = 'No GeoIP Country data in "%s"' % self._country_file else: ci = geoip_dbinfo(self._country) return ci country_info = property(country_info) def city_info(self): "Retuns information about the GeoIP city database." if self._city is None: ci = 'No GeoIP City data in "%s"' % self._city_file else: ci = geoip_dbinfo(self._city) return ci city_info = property(city_info) def info(self): "Returns information about all GeoIP databases in use." return 'Country:\n\t%s\nCity:\n\t%s' % (self.country_info, self.city_info) info = property(info) #### Methods for compatibility w/the GeoIP-Python API. #### @classmethod def open(cls, full_path, cache): return GeoIP(full_path, cache) def _rec_by_arg(self, arg): if self._city: return self.city(arg) else: return self.country(arg) region_by_addr = city region_by_name = city record_by_addr = _rec_by_arg record_by_name = _rec_by_arg country_code_by_addr = country_code country_code_by_name = country_code country_name_by_addr = country_name country_name_by_name = country_name
bsd-3-clause
arabenjamin/scikit-learn
examples/cluster/plot_mean_shift.py
348
1793
""" ============================================= A demo of the mean-shift clustering algorithm ============================================= Reference: Dorin Comaniciu and Peter Meer, "Mean Shift: A robust approach toward feature space analysis". IEEE Transactions on Pattern Analysis and Machine Intelligence. 2002. pp. 603-619. """ print(__doc__) import numpy as np from sklearn.cluster import MeanShift, estimate_bandwidth from sklearn.datasets.samples_generator import make_blobs ############################################################################### # Generate sample data centers = [[1, 1], [-1, -1], [1, -1]] X, _ = make_blobs(n_samples=10000, centers=centers, cluster_std=0.6) ############################################################################### # Compute clustering with MeanShift # The following bandwidth can be automatically detected using bandwidth = estimate_bandwidth(X, quantile=0.2, n_samples=500) ms = MeanShift(bandwidth=bandwidth, bin_seeding=True) ms.fit(X) labels = ms.labels_ cluster_centers = ms.cluster_centers_ labels_unique = np.unique(labels) n_clusters_ = len(labels_unique) print("number of estimated clusters : %d" % n_clusters_) ############################################################################### # Plot result import matplotlib.pyplot as plt from itertools import cycle plt.figure(1) plt.clf() colors = cycle('bgrcmykbgrcmykbgrcmykbgrcmyk') for k, col in zip(range(n_clusters_), colors): my_members = labels == k cluster_center = cluster_centers[k] plt.plot(X[my_members, 0], X[my_members, 1], col + '.') plt.plot(cluster_center[0], cluster_center[1], 'o', markerfacecolor=col, markeredgecolor='k', markersize=14) plt.title('Estimated number of clusters: %d' % n_clusters_) plt.show()
bsd-3-clause
ChadFulton/statsmodels
statsmodels/tsa/vector_ar/tests/test_var_jmulti.py
1
24356
from __future__ import absolute_import, print_function import numpy as np from numpy.testing import assert_, assert_allclose, assert_raises import statsmodels.datasets.macrodata.data as macro from statsmodels.compat.python import range from statsmodels.tsa.vector_ar.tests.JMulTi_results.parse_jmulti_vecm_output \ import sublists from statsmodels.tsa.vector_ar.var_model import VAR from .JMulTi_results.parse_jmulti_var_output import dt_s_tup_to_string from .JMulTi_results.parse_jmulti_var_output import load_results_jmulti atol = 0.001 # absolute tolerance rtol = 0.01 # relative tolerance datasets = [] data = {} results_ref = {} results_sm = {} debug_mode = False dont_test_se_t_p = False deterministic_terms_list = ["nc", "c", "ct"] seasonal_list = [0, 4] dt_s_list = [(det, s) for det in deterministic_terms_list for s in seasonal_list] all_tests = ["coefs", "det", "Sigma_u", "log_like", "fc", "causality", "impulse-response", "lag order", "test normality", "whiteness", "exceptions"] to_test = all_tests #["coefs", "det", "Sigma_u", "log_like", "fc", "causality"] # all_tests def load_data(dataset, data_dict): dtset = dataset.load_pandas() variables = dataset.variable_names loaded = dtset.data[variables].astype(float).values data_dict[dataset] = loaded.reshape((-1, len(variables))) def reorder_jmultis_det_terms(jmulti_output, constant, seasons): """ In case of seasonal terms and a trend term we have to reorder them to make the outputs from JMulTi and statsmodels comparable. JMulTi's ordering is: [constant], [seasonal terms], [trend term] while in statsmodels it is: [constant], [trend term], [seasonal terms] Parameters ---------- jmulti_output : ndarray (neqs x number_of_deterministic_terms) constant : bool Indicates whether there is a constant term or not in jmulti_output. seasons : int Number of seasons in the model. That means there are seasons-1 columns for seasonal terms in jmulti_output Returns ------- reordered : ndarray (neqs x number_of_deterministic_terms) jmulti_output reordered such that the order of deterministic terms matches that of statsmodels. """ if seasons == 0: return jmulti_output constant = int(constant) const_column = jmulti_output[:, :constant] season_columns = jmulti_output[:, constant:constant+seasons-1].copy() trend_columns = jmulti_output[:, constant+seasons-1:].copy() return np.hstack((const_column, trend_columns, season_columns)) def generate_exog_from_season(seasons, endog_len): """ Translate seasons to exog matrix. Parameters ---------- seasons : int Number of seasons. endog_len : int Number of observations. Returns ------- exog : ndarray or None If seasonal deterministic terms exist, the corresponding exog-matrix is returned. Otherwise, None is returned. """ exog_stack = [] if seasons > 0: season_exog = np.zeros((seasons - 1, endog_len)) for i in range(seasons - 1): season_exog[i, i::seasons] = 1 # season_exog = season_exog[:, ::-1] # season_exog = np.hstack((season_exog[:, 3:4], # season_exog[:, :-1])) # season_exog = np.hstack((season_exog[:, 2:4], # season_exog[:, :-2])) # season_exog = np.hstack((season_exog[:, 1:4], season_exog[:, :-3])) # season_exog[1] = -season_exog[1] # the following line is commented out because seasonal terms are # *not* centered in JMulTi's VAR-framework (in contrast to VECM) # season_exog -= 1 / seasons season_exog = season_exog.T exog_stack.append(season_exog) if exog_stack != []: exog = np.column_stack(exog_stack) else: exog = None return exog def load_results_statsmodels(dataset): results_per_deterministic_terms = dict.fromkeys(dt_s_list) for dt_s_tup in dt_s_list: endog = data[dataset] exog = generate_exog_from_season(dt_s_tup[1], len(endog)) model = VAR(endog, exog) results_per_deterministic_terms[dt_s_tup] = model.fit( maxlags=4, trend=dt_s_tup[0], method="ols") return results_per_deterministic_terms def build_err_msg(ds, dt_s, parameter_str): dt = dt_s_tup_to_string(dt_s) seasons = dt_s[1] err_msg = "Error in " + parameter_str + " for:\n" err_msg += "- Dataset: " + ds.__str__() + "\n" err_msg += "- Deterministic terms: " err_msg += (dt_s[0] if dt != "nc" else "no det. terms") if seasons > 0: err_msg += ", seasons: " + str(seasons) return err_msg def setup(): datasets.append(macro) # TODO: append more data sets for more test cases. for ds in datasets: load_data(ds, data) results_ref[ds] = load_results_jmulti(ds, dt_s_list) results_sm[ds] = load_results_statsmodels(ds) setup() def test_ols_coefs(): if debug_mode: if "coefs" not in to_test: return print("\n\nESTIMATED PARAMETER MATRICES FOR LAGGED ENDOG", end="") for ds in datasets: for dt_s in dt_s_list: if debug_mode: print("\n" + dt_s_tup_to_string(dt_s) + ": ", end="") # estimated parameter vector err_msg = build_err_msg(ds, dt_s, "PARAMETER MATRICES ENDOG") obtained = np.hstack(results_sm[ds][dt_s].coefs) desired = results_ref[ds][dt_s]["est"]["Lagged endogenous term"] assert_allclose(obtained, desired, rtol, atol, False, err_msg) if debug_mode and dont_test_se_t_p: continue # standard errors obt = results_sm[ds][dt_s].stderr_endog_lagged des = results_ref[ds][dt_s]["se"]["Lagged endogenous term"].T assert_allclose(obt, des, rtol, atol, False, "STANDARD ERRORS\n" + err_msg) # t-values obt = results_sm[ds][dt_s].tvalues_endog_lagged des = results_ref[ds][dt_s]["t"]["Lagged endogenous term"].T assert_allclose(obt, des, rtol, atol, False, "t-VALUES\n" + err_msg) # p-values obt = results_sm[ds][dt_s].pvalues_endog_lagged des = results_ref[ds][dt_s]["p"]["Lagged endogenous term"].T assert_allclose(obt, des, rtol, atol, False, "p-VALUES\n" + err_msg) def test_ols_det_terms(): if debug_mode: if "det" not in to_test: return print("\n\nESTIMATED PARAMETERS FOR DETERMINISTIC TERMS", end="") for ds in datasets: for dt_s in dt_s_list: if debug_mode: print("\n" + dt_s_tup_to_string(dt_s) + ": ", end="") err_msg = build_err_msg(ds, dt_s, "PARAMETER MATRICES EXOG") det_key_ref = "Deterministic term" # If there are no det. terms, just make sure we don't compute any: if det_key_ref not in results_ref[ds][dt_s]["est"].keys(): assert_((results_sm[ds][dt_s].coefs_exog.size == 0 and results_sm[ds][dt_s].stderr_dt.size == 0 and results_sm[ds][dt_s].tvalues_dt.size == 0 and results_sm[ds][dt_s].pvalues_dt.size == 0), err_msg) continue obtained = results_sm[ds][dt_s].coefs_exog desired = results_ref[ds][dt_s]["est"][det_key_ref] desired = reorder_jmultis_det_terms( desired, dt_s[0].startswith("c"), dt_s[1]) assert_allclose(obtained, desired, rtol, atol, False, err_msg) if debug_mode and dont_test_se_t_p: continue # standard errors obt = results_sm[ds][dt_s].stderr_dt des = results_ref[ds][dt_s]["se"][det_key_ref] des = reorder_jmultis_det_terms(des, dt_s[0].startswith("c"), dt_s[1]).T assert_allclose(obt, des, rtol, atol, False, "STANDARD ERRORS\n" + err_msg) # t-values obt = results_sm[ds][dt_s].tvalues_dt des = results_ref[ds][dt_s]["t"][det_key_ref] des = reorder_jmultis_det_terms(des, dt_s[0].startswith("c"), dt_s[1]).T assert_allclose(obt, des, rtol, atol, False, "t-VALUES\n" + err_msg) # p-values obt = results_sm[ds][dt_s].pvalues_dt des = results_ref[ds][dt_s]["p"][det_key_ref] des = reorder_jmultis_det_terms(des, dt_s[0].startswith("c"), dt_s[1]).T assert_allclose(obt, des, rtol, atol, False, "p-VALUES\n" + err_msg) def test_ols_sigma(): if debug_mode: if "Sigma_u" not in to_test: return print("\n\nSIGMA_U", end="") for ds in datasets: for dt in dt_s_list: if debug_mode: print("\n" + dt_s_tup_to_string(dt) + ": ", end="") err_msg = build_err_msg(ds, dt, "Sigma_u") obtained = results_sm[ds][dt].sigma_u desired = results_ref[ds][dt]["est"]["Sigma_u"] assert_allclose(obtained, desired, rtol, atol, False, err_msg) def test_log_like(): if debug_mode: if "log_like" not in to_test: return else: print("\n\nLOG LIKELIHOOD", end="") for ds in datasets: for dt in dt_s_list: if debug_mode: print("\n" + dt_s_tup_to_string(dt) + ": ", end="") err_msg = build_err_msg(ds, dt, "Log Likelihood") obtained = results_sm[ds][dt].llf desired = results_ref[ds][dt]["log_like"] assert_allclose(obtained, desired, rtol, atol, False, err_msg) def test_fc(): if debug_mode: if "fc" not in to_test: return else: print("\n\nFORECAST", end="") for ds in datasets: for dt in dt_s_list: if debug_mode: print("\n" + dt_s_tup_to_string(dt) + ": ", end="") steps = 5 # parsed JMulTi output comprises 5 steps last_observations = results_sm[ds][dt].endog[ -results_sm[ds][dt].k_ar:] seasons = dt[1] if seasons == 0: exog_future = None else: exog_future = np.zeros((steps, seasons-1)) # the following line is appropriate only if the last # observation was in the next to last season (this is the case # for macrodata) exog_future[1:seasons] = np.identity(seasons-1) err_msg = build_err_msg(ds, dt, "FORECAST") # test point forecast functionality of forecast method obtained = results_sm[ds][dt].forecast( y=last_observations, steps=steps, exog_future=exog_future) desired = results_ref[ds][dt]["fc"]["fc"] assert_allclose(obtained, desired, rtol, atol, False, err_msg) # test forecast method with confidence interval calculation err_msg = build_err_msg(ds, dt, "FORECAST WITH INTERVALS") obtained = results_sm[ds][dt].forecast_interval( y=last_observations, steps=steps, alpha=0.05, exog_future=exog_future) obt = obtained[0] # forecast obt_l = obtained[1] # lower bound obt_u = obtained[2] # upper bound des = results_ref[ds][dt]["fc"]["fc"] des_l = results_ref[ds][dt]["fc"]["lower"] des_u = results_ref[ds][dt]["fc"]["upper"] assert_allclose(obt, des, rtol, atol, False, err_msg) assert_allclose(obt_l, des_l, rtol, atol, False, err_msg) assert_allclose(obt_u, des_u, rtol, atol, False, err_msg) def test_causality(): # test Granger- and instantaneous causality if debug_mode: if "causality" not in to_test: return else: print("\n\nCAUSALITY", end="") for ds in datasets: for dt in dt_s_list: if debug_mode: print("\n" + dt_s_tup_to_string(dt) + ": ", end="") err_msg_g_p = build_err_msg(ds, dt, "GRANGER CAUS. - p-VALUE") err_msg_g_t = build_err_msg(ds, dt, "GRANGER CAUS. - TEST STAT.") err_msg_i_p = build_err_msg(ds, dt, "INSTANT. CAUS. - p-VALUE") err_msg_i_t = build_err_msg(ds, dt, "INSTANT. CAUS. - TEST STAT.") v_ind = range(len(ds.variable_names)) for causing_ind in sublists(v_ind, 1, len(v_ind)-1): causing_names = ["y" + str(i+1) for i in causing_ind] causing_key = tuple(ds.variable_names[i] for i in causing_ind) caused_ind = [i for i in v_ind if i not in causing_ind] caused_names = ["y" + str(i+1) for i in caused_ind] caused_key = tuple(ds.variable_names[i] for i in caused_ind) # test Granger-causality ###################################### granger_sm_ind = results_sm[ds][ dt].test_causality(caused_ind, causing_ind) granger_sm_str = results_sm[ds][ dt].test_causality(caused_names, causing_names) # test test-statistic for Granger non-causality: g_t_obt = granger_sm_ind.test_statistic g_t_des = results_ref[ds][dt]["granger_caus"][ "test_stat"][(causing_key, caused_key)] assert_allclose(g_t_obt, g_t_des, rtol, atol, False, err_msg_g_t) # check whether string sequences as args work in the same way: g_t_obt_str = granger_sm_str.test_statistic assert_allclose(g_t_obt_str, g_t_obt, 1e-07, 0, False, err_msg_g_t + " - sequences of integers and ".upper() + "strings as arguments don't yield the same result!".upper()) # check if int (e.g. 0) as index and list of int ([0]) yield # the same result: if len(causing_ind) == 1 or len(caused_ind) == 1: ci = causing_ind[0] if len(causing_ind)==1 else causing_ind ce = caused_ind[0] if len(caused_ind) == 1 else caused_ind granger_sm_single_ind = results_sm[ds][ dt].test_causality(ce, ci) g_t_obt_single = granger_sm_single_ind.test_statistic assert_allclose(g_t_obt_single, g_t_obt, 1e-07, 0, False, err_msg_g_t + " - list of int and int as ".upper() + "argument don't yield the same result!".upper()) # test p-value for Granger non-causality: g_p_obt = granger_sm_ind.pvalue g_p_des = results_ref[ds][dt]["granger_caus"]["p"][( causing_key, caused_key)] assert_allclose(g_p_obt, g_p_des, rtol, atol, False, err_msg_g_p) # check whether string sequences as args work in the same way: g_p_obt_str = granger_sm_str.pvalue assert_allclose(g_p_obt_str, g_p_obt, 1e-07, 0, False, err_msg_g_t + " - sequences of integers and ".upper() + "strings as arguments don't yield the same result!".upper()) # check if int (e.g. 0) as index and list of int ([0]) yield # the same result: if len(causing_ind) == 1: g_p_obt_single = granger_sm_single_ind.pvalue assert_allclose(g_p_obt_single, g_p_obt, 1e-07, 0, False, err_msg_g_t + " - list of int and int as ".upper() + \ "argument don't yield the same result!".upper()) # test instantaneous causality ################################ inst_sm_ind = results_sm[ds][dt].test_inst_causality( causing_ind) inst_sm_str = results_sm[ds][dt].test_inst_causality( causing_names) # test test-statistic for instantaneous non-causality t_obt = inst_sm_ind.test_statistic t_des = results_ref[ds][dt]["inst_caus"][ "test_stat"][(causing_key, caused_key)] assert_allclose(t_obt, t_des, rtol, atol, False, err_msg_i_t) # check whether string sequences as args work in the same way: t_obt_str = inst_sm_str.test_statistic assert_allclose(t_obt_str, t_obt, 1e-07, 0, False, err_msg_i_t + " - sequences of integers and ".upper() + "strings as arguments don't yield the same result!".upper()) # check if int (e.g. 0) as index and list of int ([0]) yield # the same result: if len(causing_ind) == 1: inst_sm_single_ind = results_sm[ds][ dt].test_inst_causality(causing_ind[0]) t_obt_single = inst_sm_single_ind.test_statistic assert_allclose(t_obt_single, t_obt, 1e-07, 0, False, err_msg_i_t + " - list of int and int as ".upper() + "argument don't yield the same result!".upper()) # test p-value for instantaneous non-causality p_obt = results_sm[ds][dt].test_inst_causality( causing_ind).pvalue p_des = results_ref[ds][dt]["inst_caus"]["p"][( causing_key, caused_key)] assert_allclose(p_obt, p_des, rtol, atol, False, err_msg_i_p) # check whether string sequences as args work in the same way: p_obt_str = inst_sm_str.pvalue assert_allclose(p_obt_str, p_obt, 1e-07, 0, False, err_msg_i_p + " - sequences of integers and ".upper() + "strings as arguments don't yield the same result!".upper()) # check if int (e.g. 0) as index and list of int ([0]) yield # the same result: if len(causing_ind) == 1: inst_sm_single_ind = results_sm[ds][ dt].test_inst_causality(causing_ind[0]) p_obt_single = inst_sm_single_ind.pvalue assert_allclose(p_obt_single, p_obt, 1e-07, 0, False, err_msg_i_p + " - list of int and int as ".upper() + "argument don't yield the same result!".upper()) def test_impulse_response(): if debug_mode: if "impulse-response" not in to_test: return else: print("\n\nIMPULSE-RESPONSE", end="") for ds in datasets: for dt in dt_s_list: if debug_mode: print("\n" + dt_s_tup_to_string(dt) + ": ", end="") err_msg = build_err_msg(ds, dt, "IMULSE-RESPONSE") periods = 20 obtained_all = results_sm[ds][dt].irf(periods=periods).irfs # flatten inner arrays to make them comparable to parsed results: obtained_all = obtained_all.reshape(periods+1, -1) desired_all = results_ref[ds][dt]["ir"] assert_allclose(obtained_all, desired_all, rtol, atol, False, err_msg) def test_lag_order_selection(): if debug_mode: if "lag order" not in to_test: return else: print("\n\nLAG ORDER SELECTION", end="") for ds in datasets: for dt in dt_s_list: if debug_mode: print("\n" + dt_s_tup_to_string(dt) + ": ", end="") endog_tot = data[ds] exog = generate_exog_from_season(dt[1], len(endog_tot)) model = VAR(endog_tot, exog) obtained_all = model.select_order(10, trend=dt[0]) for ic in ["aic", "fpe", "hqic", "bic"]: err_msg = build_err_msg(ds, dt, "LAG ORDER SELECTION - " + ic.upper()) obtained = getattr(obtained_all, ic) desired = results_ref[ds][dt]["lagorder"][ic] assert_allclose(obtained, desired, rtol, atol, False, err_msg) def test_normality(): if debug_mode: if "test normality" not in to_test: return else: print("\n\nTEST NON-NORMALITY", end="") for ds in datasets: for dt in dt_s_list: if debug_mode: print("\n" + dt_s_tup_to_string(dt) + ": ", end="") obtained = results_sm[ds][dt].test_normality(signif=0.05) err_msg = build_err_msg(ds, dt, "TEST NON-NORMALITY - STATISTIC") obt_statistic = obtained.test_statistic des_statistic = results_ref[ds][dt]["test_norm"][ "joint_test_statistic"] assert_allclose(obt_statistic, des_statistic, rtol, atol, False, err_msg) err_msg = build_err_msg(ds, dt, "TEST NON-NORMALITY - P-VALUE") obt_pvalue = obtained.pvalue des_pvalue = results_ref[ds][dt]["test_norm"]["joint_pvalue"] assert_allclose(obt_pvalue, des_pvalue, rtol, atol, False, err_msg) # call methods to assure they don't raise exceptions obtained.summary() str(obtained) # __str__() def test_whiteness(): if debug_mode: if "whiteness" not in to_test: return else: print("\n\nTEST WHITENESS OF RESIDUALS", end="") for ds in datasets: for dt in dt_s_list: if debug_mode: print("\n" + dt_s_tup_to_string(dt) + ": ", end="") lags = results_ref[ds][dt]["whiteness"]["tested order"] obtained = results_sm[ds][dt].test_whiteness(nlags=lags) # test statistic err_msg = build_err_msg(ds, dt, "WHITENESS OF RESIDUALS - " "TEST STATISTIC") desired = results_ref[ds][dt]["whiteness"]["test statistic"] assert_allclose(obtained.test_statistic, desired, rtol, atol, False, err_msg) # p-value err_msg = build_err_msg(ds, dt, "WHITENESS OF RESIDUALS - " "P-VALUE") desired = results_ref[ds][dt]["whiteness"]["p-value"] assert_allclose(obtained.pvalue, desired, rtol, atol, False, err_msg) obtained = results_sm[ds][dt].test_whiteness(nlags=lags, adjusted=True) # test statistic (adjusted Portmanteau test) err_msg = build_err_msg(ds, dt, "WHITENESS OF RESIDUALS - " "TEST STATISTIC (ADJUSTED TEST)") desired = results_ref[ds][dt]["whiteness"]["test statistic adj."] assert_allclose(obtained.test_statistic, desired, rtol, atol, False, err_msg) # p-value (adjusted Portmanteau test) err_msg = build_err_msg(ds, dt, "WHITENESS OF RESIDUALS - " "P-VALUE (ADJUSTED TEST)") desired = results_ref[ds][dt]["whiteness"]["p-value adjusted"] assert_allclose(obtained.pvalue, desired, rtol, atol, False, err_msg) def test_exceptions(): if debug_mode: if "exceptions" not in to_test: return else: print("\n\nEXCEPTIONS\n", end="") for ds in datasets: for dt in dt_s_list: if debug_mode: print("\n" + dt_s_tup_to_string(dt) + ": ", end="") # instant causality: ### 0<signif<1 assert_raises(ValueError, results_sm[ds][dt].test_inst_causality, 0, 0) # this means signif=0 ### causing must be int, str or iterable of int or str assert_raises(TypeError, results_sm[ds][dt].test_inst_causality, [0.5]) # 0.5 not an int
bsd-3-clause
tapomayukh/projects_in_python
classification/Classification_with_kNN/Single_Contact_Classification/Time_Window/test11_cross_validate_objects_800ms.py
1
4624
# Principal Component Analysis Code : from numpy import mean,cov,double,cumsum,dot,linalg,array,rank,size,flipud from pylab import * import numpy as np import matplotlib.pyplot as pp #from enthought.mayavi import mlab import scipy.ndimage as ni import roslib; roslib.load_manifest('sandbox_tapo_darpa_m3') import rospy #import hrl_lib.mayavi2_util as mu import hrl_lib.viz as hv import hrl_lib.util as ut import hrl_lib.matplotlib_util as mpu import pickle from mvpa.clfs.knn import kNN from mvpa.datasets import Dataset from mvpa.clfs.transerror import TransferError from mvpa.misc.data_generators import normalFeatureDataset from mvpa.algorithms.cvtranserror import CrossValidatedTransferError from mvpa.datasets.splitters import NFoldSplitter import sys sys.path.insert(0, '/home/tapo/svn/robot1_data/usr/tapo/data_code/Classification/Data/Single_Contact_kNN/Window') from data_800ms import Fmat_original def pca(X): #get dimensions num_data,dim = X.shape #center data mean_X = X.mean(axis=1) M = (X-mean_X) # subtract the mean (along columns) Mcov = cov(M) print 'PCA - COV-Method used' val,vec = linalg.eig(Mcov) #return the projection matrix, the variance and the mean return vec,val,mean_X, M, Mcov def my_mvpa(Y,num2): #Using PYMVPA PCA_data = np.array(Y) PCA_label_2 = ['Styrofoam-Fixed']*5 + ['Books-Fixed']*5 + ['Bucket-Fixed']*5 + ['Bowl-Fixed']*5 + ['Can-Fixed']*5 + ['Box-Fixed']*5 + ['Pipe-Fixed']*5 + ['Styrofoam-Movable']*5 + ['Container-Movable']*5 + ['Books-Movable']*5 + ['Cloth-Roll-Movable']*5 + ['Black-Rubber-Movable']*5 + ['Can-Movable']*5 + ['Box-Movable']*5 + ['Rug-Fixed']*5 + ['Bubble-Wrap-1-Fixed']*5 + ['Pillow-1-Fixed']*5 + ['Bubble-Wrap-2-Fixed']*5 + ['Sponge-Fixed']*5 + ['Foliage-Fixed']*5 + ['Pillow-2-Fixed']*5 + ['Rug-Movable']*5 + ['Bubble-Wrap-1-Movable']*5 + ['Pillow-1-Movable']*5 + ['Bubble-Wrap-2-Movable']*5 + ['Pillow-2-Movable']*5 + ['Cushion-Movable']*5 + ['Sponge-Movable']*5 clf = kNN(k=num2) terr = TransferError(clf) ds1 = Dataset(samples=PCA_data,labels=PCA_label_2) cvterr = CrossValidatedTransferError(terr,NFoldSplitter(cvtype=1),enable_states=['confusion']) error = cvterr(ds1) return (1-error)*100 def result(eigvec_total,eigval_total,mean_data_total,B,C,num_PC): # Reduced Eigen-Vector Matrix according to highest Eigenvalues..(Considering First 20 based on above figure) W = eigvec_total[:,0:num_PC] m_W, n_W = np.shape(W) # Normalizes the data set with respect to its variance (Not an Integral part of PCA, but useful) length = len(eigval_total) s = np.matrix(np.zeros(length)).T i = 0 while i < length: s[i] = sqrt(C[i,i]) i = i+1 Z = np.divide(B,s) m_Z, n_Z = np.shape(Z) #Projected Data: Y = (W.T)*B # 'B' for my Laptop: otherwise 'Z' instead of 'B' m_Y, n_Y = np.shape(Y.T) return Y.T if __name__ == '__main__': Fmat = Fmat_original # Checking the Data-Matrix m_tot, n_tot = np.shape(Fmat) print 'Total_Matrix_Shape:',m_tot,n_tot eigvec_total, eigval_total, mean_data_total, B, C = pca(Fmat) #print eigvec_total #print eigval_total #print mean_data_total m_eigval_total, n_eigval_total = np.shape(np.matrix(eigval_total)) m_eigvec_total, n_eigvec_total = np.shape(eigvec_total) m_mean_data_total, n_mean_data_total = np.shape(np.matrix(mean_data_total)) print 'Eigenvalue Shape:',m_eigval_total, n_eigval_total print 'Eigenvector Shape:',m_eigvec_total, n_eigvec_total print 'Mean-Data Shape:',m_mean_data_total, n_mean_data_total #Recall that the cumulative sum of the eigenvalues shows the level of variance accounted by each of the corresponding eigenvectors. On the x axis there is the number of eigenvalues used. perc_total = cumsum(eigval_total)/sum(eigval_total) num_PC=1 while num_PC <=20: Proj = np.zeros((140,num_PC)) Proj = result(eigvec_total,eigval_total,mean_data_total,B,C,num_PC) # PYMVPA: num=0 cv_acc = np.zeros(21) while num <=20: cv_acc[num] = my_mvpa(Proj,num) num = num+1 plot(np.arange(21),cv_acc,'-s') grid('True') hold('True') num_PC = num_PC+1 legend(('1-PC', '2-PCs', '3-PCs', '4-PCs', '5-PCs', '6-PCs', '7-PCs', '8-PCs', '9-PCs', '10-PCs', '11-PC', '12-PCs', '13-PCs', '14-PCs', '15-PCs', '16-PCs', '17-PCs', '18-PCs', '19-PCs', '20-PCs')) ylabel('Cross-Validation Accuracy') xlabel('k in k-NN Classifier') show()
mit
heli522/scikit-learn
sklearn/cross_validation.py
4
62314
""" The :mod:`sklearn.cross_validation` module includes utilities for cross- validation and performance evaluation. """ # Author: Alexandre Gramfort <alexandre.gramfort@inria.fr>, # Gael Varoquaux <gael.varoquaux@normalesup.org>, # Olivier Grisel <olivier.grisel@ensta.org> # License: BSD 3 clause from __future__ import print_function from __future__ import division import warnings from itertools import chain, combinations from math import ceil, floor, factorial import numbers import time from abc import ABCMeta, abstractmethod import numpy as np import scipy.sparse as sp from .base import is_classifier, clone from .utils import indexable, check_random_state, safe_indexing from .utils.validation import (_is_arraylike, _num_samples, check_array, column_or_1d) from .utils.multiclass import type_of_target from .externals.joblib import Parallel, delayed, logger from .externals.six import with_metaclass from .externals.six.moves import zip from .metrics.scorer import check_scoring from .utils.fixes import bincount __all__ = ['KFold', 'LeaveOneLabelOut', 'LeaveOneOut', 'LeavePLabelOut', 'LeavePOut', 'ShuffleSplit', 'StratifiedKFold', 'StratifiedShuffleSplit', 'PredefinedSplit', 'LabelShuffleSplit', 'check_cv', 'cross_val_score', 'cross_val_predict', 'permutation_test_score', 'train_test_split'] class _PartitionIterator(with_metaclass(ABCMeta)): """Base class for CV iterators where train_mask = ~test_mask Implementations must define `_iter_test_masks` or `_iter_test_indices`. Parameters ---------- n : int Total number of elements in dataset. """ def __init__(self, n): if abs(n - int(n)) >= np.finfo('f').eps: raise ValueError("n must be an integer") self.n = int(n) def __iter__(self): ind = np.arange(self.n) for test_index in self._iter_test_masks(): train_index = np.logical_not(test_index) train_index = ind[train_index] test_index = ind[test_index] yield train_index, test_index # Since subclasses must implement either _iter_test_masks or # _iter_test_indices, neither can be abstract. def _iter_test_masks(self): """Generates boolean masks corresponding to test sets. By default, delegates to _iter_test_indices() """ for test_index in self._iter_test_indices(): test_mask = self._empty_mask() test_mask[test_index] = True yield test_mask def _iter_test_indices(self): """Generates integer indices corresponding to test sets.""" raise NotImplementedError def _empty_mask(self): return np.zeros(self.n, dtype=np.bool) class LeaveOneOut(_PartitionIterator): """Leave-One-Out cross validation iterator. Provides train/test indices to split data in train test sets. Each sample is used once as a test set (singleton) while the remaining samples form the training set. Note: ``LeaveOneOut(n)`` is equivalent to ``KFold(n, n_folds=n)`` and ``LeavePOut(n, p=1)``. Due to the high number of test sets (which is the same as the number of samples) this cross validation method can be very costly. For large datasets one should favor KFold, StratifiedKFold or ShuffleSplit. Read more in the :ref:`User Guide <cross_validation>`. Parameters ---------- n : int Total number of elements in dataset. Examples -------- >>> from sklearn import cross_validation >>> X = np.array([[1, 2], [3, 4]]) >>> y = np.array([1, 2]) >>> loo = cross_validation.LeaveOneOut(2) >>> len(loo) 2 >>> print(loo) sklearn.cross_validation.LeaveOneOut(n=2) >>> for train_index, test_index in loo: ... print("TRAIN:", train_index, "TEST:", test_index) ... X_train, X_test = X[train_index], X[test_index] ... y_train, y_test = y[train_index], y[test_index] ... print(X_train, X_test, y_train, y_test) TRAIN: [1] TEST: [0] [[3 4]] [[1 2]] [2] [1] TRAIN: [0] TEST: [1] [[1 2]] [[3 4]] [1] [2] See also -------- LeaveOneLabelOut for splitting the data according to explicit, domain-specific stratification of the dataset. """ def _iter_test_indices(self): return range(self.n) def __repr__(self): return '%s.%s(n=%i)' % ( self.__class__.__module__, self.__class__.__name__, self.n, ) def __len__(self): return self.n class LeavePOut(_PartitionIterator): """Leave-P-Out cross validation iterator Provides train/test indices to split data in train test sets. This results in testing on all distinct samples of size p, while the remaining n - p samples form the training set in each iteration. Note: ``LeavePOut(n, p)`` is NOT equivalent to ``KFold(n, n_folds=n // p)`` which creates non-overlapping test sets. Due to the high number of iterations which grows combinatorically with the number of samples this cross validation method can be very costly. For large datasets one should favor KFold, StratifiedKFold or ShuffleSplit. Read more in the :ref:`User Guide <cross_validation>`. Parameters ---------- n : int Total number of elements in dataset. p : int Size of the test sets. Examples -------- >>> from sklearn import cross_validation >>> X = np.array([[1, 2], [3, 4], [5, 6], [7, 8]]) >>> y = np.array([1, 2, 3, 4]) >>> lpo = cross_validation.LeavePOut(4, 2) >>> len(lpo) 6 >>> print(lpo) sklearn.cross_validation.LeavePOut(n=4, p=2) >>> for train_index, test_index in lpo: ... print("TRAIN:", train_index, "TEST:", test_index) ... X_train, X_test = X[train_index], X[test_index] ... y_train, y_test = y[train_index], y[test_index] TRAIN: [2 3] TEST: [0 1] TRAIN: [1 3] TEST: [0 2] TRAIN: [1 2] TEST: [0 3] TRAIN: [0 3] TEST: [1 2] TRAIN: [0 2] TEST: [1 3] TRAIN: [0 1] TEST: [2 3] """ def __init__(self, n, p): super(LeavePOut, self).__init__(n) self.p = p def _iter_test_indices(self): for comb in combinations(range(self.n), self.p): yield np.array(comb) def __repr__(self): return '%s.%s(n=%i, p=%i)' % ( self.__class__.__module__, self.__class__.__name__, self.n, self.p, ) def __len__(self): return int(factorial(self.n) / factorial(self.n - self.p) / factorial(self.p)) class _BaseKFold(with_metaclass(ABCMeta, _PartitionIterator)): """Base class to validate KFold approaches""" @abstractmethod def __init__(self, n, n_folds, shuffle, random_state): super(_BaseKFold, self).__init__(n) if abs(n_folds - int(n_folds)) >= np.finfo('f').eps: raise ValueError("n_folds must be an integer") self.n_folds = n_folds = int(n_folds) if n_folds <= 1: raise ValueError( "k-fold cross validation requires at least one" " train / test split by setting n_folds=2 or more," " got n_folds={0}.".format(n_folds)) if n_folds > self.n: raise ValueError( ("Cannot have number of folds n_folds={0} greater" " than the number of samples: {1}.").format(n_folds, n)) if not isinstance(shuffle, bool): raise TypeError("shuffle must be True or False;" " got {0}".format(shuffle)) self.shuffle = shuffle self.random_state = random_state class KFold(_BaseKFold): """K-Folds cross validation iterator. Provides train/test indices to split data in train test sets. Split dataset into k consecutive folds (without shuffling). Each fold is then used a validation set once while the k - 1 remaining fold form the training set. Read more in the :ref:`User Guide <cross_validation>`. Parameters ---------- n : int Total number of elements. n_folds : int, default=3 Number of folds. Must be at least 2. shuffle : boolean, optional Whether to shuffle the data before splitting into batches. random_state : None, int or RandomState Pseudo-random number generator state used for random sampling. If None, use default numpy RNG for shuffling Examples -------- >>> from sklearn import cross_validation >>> X = np.array([[1, 2], [3, 4], [1, 2], [3, 4]]) >>> y = np.array([1, 2, 3, 4]) >>> kf = cross_validation.KFold(4, n_folds=2) >>> len(kf) 2 >>> print(kf) # doctest: +NORMALIZE_WHITESPACE sklearn.cross_validation.KFold(n=4, n_folds=2, shuffle=False, random_state=None) >>> for train_index, test_index in kf: ... print("TRAIN:", train_index, "TEST:", test_index) ... X_train, X_test = X[train_index], X[test_index] ... y_train, y_test = y[train_index], y[test_index] TRAIN: [2 3] TEST: [0 1] TRAIN: [0 1] TEST: [2 3] Notes ----- The first n % n_folds folds have size n // n_folds + 1, other folds have size n // n_folds. See also -------- StratifiedKFold: take label information into account to avoid building folds with imbalanced class distributions (for binary or multiclass classification tasks). """ def __init__(self, n, n_folds=3, shuffle=False, random_state=None): super(KFold, self).__init__(n, n_folds, shuffle, random_state) self.idxs = np.arange(n) if shuffle: rng = check_random_state(self.random_state) rng.shuffle(self.idxs) def _iter_test_indices(self): n = self.n n_folds = self.n_folds fold_sizes = (n // n_folds) * np.ones(n_folds, dtype=np.int) fold_sizes[:n % n_folds] += 1 current = 0 for fold_size in fold_sizes: start, stop = current, current + fold_size yield self.idxs[start:stop] current = stop def __repr__(self): return '%s.%s(n=%i, n_folds=%i, shuffle=%s, random_state=%s)' % ( self.__class__.__module__, self.__class__.__name__, self.n, self.n_folds, self.shuffle, self.random_state, ) def __len__(self): return self.n_folds class StratifiedKFold(_BaseKFold): """Stratified K-Folds cross validation iterator Provides train/test indices to split data in train test sets. This cross-validation object is a variation of KFold that returns stratified folds. The folds are made by preserving the percentage of samples for each class. Read more in the :ref:`User Guide <cross_validation>`. Parameters ---------- y : array-like, [n_samples] Samples to split in K folds. n_folds : int, default=3 Number of folds. Must be at least 2. shuffle : boolean, optional Whether to shuffle each stratification of the data before splitting into batches. random_state : None, int or RandomState Pseudo-random number generator state used for random sampling. If None, use default numpy RNG for shuffling Examples -------- >>> from sklearn import cross_validation >>> X = np.array([[1, 2], [3, 4], [1, 2], [3, 4]]) >>> y = np.array([0, 0, 1, 1]) >>> skf = cross_validation.StratifiedKFold(y, n_folds=2) >>> len(skf) 2 >>> print(skf) # doctest: +NORMALIZE_WHITESPACE sklearn.cross_validation.StratifiedKFold(labels=[0 0 1 1], n_folds=2, shuffle=False, random_state=None) >>> for train_index, test_index in skf: ... print("TRAIN:", train_index, "TEST:", test_index) ... X_train, X_test = X[train_index], X[test_index] ... y_train, y_test = y[train_index], y[test_index] TRAIN: [1 3] TEST: [0 2] TRAIN: [0 2] TEST: [1 3] Notes ----- All the folds have size trunc(n_samples / n_folds), the last one has the complementary. """ def __init__(self, y, n_folds=3, shuffle=False, random_state=None): super(StratifiedKFold, self).__init__( len(y), n_folds, shuffle, random_state) y = np.asarray(y) n_samples = y.shape[0] unique_labels, y_inversed = np.unique(y, return_inverse=True) label_counts = bincount(y_inversed) min_labels = np.min(label_counts) if self.n_folds > min_labels: warnings.warn(("The least populated class in y has only %d" " members, which is too few. The minimum" " number of labels for any class cannot" " be less than n_folds=%d." % (min_labels, self.n_folds)), Warning) # don't want to use the same seed in each label's shuffle if self.shuffle: rng = check_random_state(self.random_state) else: rng = self.random_state # pre-assign each sample to a test fold index using individual KFold # splitting strategies for each label so as to respect the # balance of labels per_label_cvs = [ KFold(max(c, self.n_folds), self.n_folds, shuffle=self.shuffle, random_state=rng) for c in label_counts] test_folds = np.zeros(n_samples, dtype=np.int) for test_fold_idx, per_label_splits in enumerate(zip(*per_label_cvs)): for label, (_, test_split) in zip(unique_labels, per_label_splits): label_test_folds = test_folds[y == label] # the test split can be too big because we used # KFold(max(c, self.n_folds), self.n_folds) instead of # KFold(c, self.n_folds) to make it possible to not crash even # if the data is not 100% stratifiable for all the labels # (we use a warning instead of raising an exception) # If this is the case, let's trim it: test_split = test_split[test_split < len(label_test_folds)] label_test_folds[test_split] = test_fold_idx test_folds[y == label] = label_test_folds self.test_folds = test_folds self.y = y def _iter_test_masks(self): for i in range(self.n_folds): yield self.test_folds == i def __repr__(self): return '%s.%s(labels=%s, n_folds=%i, shuffle=%s, random_state=%s)' % ( self.__class__.__module__, self.__class__.__name__, self.y, self.n_folds, self.shuffle, self.random_state, ) def __len__(self): return self.n_folds class LeaveOneLabelOut(_PartitionIterator): """Leave-One-Label_Out cross-validation iterator Provides train/test indices to split data according to a third-party provided label. This label information can be used to encode arbitrary domain specific stratifications of the samples as integers. For instance the labels could be the year of collection of the samples and thus allow for cross-validation against time-based splits. Read more in the :ref:`User Guide <cross_validation>`. Parameters ---------- labels : array-like of int with shape (n_samples,) Arbitrary domain-specific stratification of the data to be used to draw the splits. Examples -------- >>> from sklearn import cross_validation >>> X = np.array([[1, 2], [3, 4], [5, 6], [7, 8]]) >>> y = np.array([1, 2, 1, 2]) >>> labels = np.array([1, 1, 2, 2]) >>> lol = cross_validation.LeaveOneLabelOut(labels) >>> len(lol) 2 >>> print(lol) sklearn.cross_validation.LeaveOneLabelOut(labels=[1 1 2 2]) >>> for train_index, test_index in lol: ... print("TRAIN:", train_index, "TEST:", test_index) ... X_train, X_test = X[train_index], X[test_index] ... y_train, y_test = y[train_index], y[test_index] ... print(X_train, X_test, y_train, y_test) TRAIN: [2 3] TEST: [0 1] [[5 6] [7 8]] [[1 2] [3 4]] [1 2] [1 2] TRAIN: [0 1] TEST: [2 3] [[1 2] [3 4]] [[5 6] [7 8]] [1 2] [1 2] """ def __init__(self, labels): super(LeaveOneLabelOut, self).__init__(len(labels)) # We make a copy of labels to avoid side-effects during iteration self.labels = np.array(labels, copy=True) self.unique_labels = np.unique(labels) self.n_unique_labels = len(self.unique_labels) def _iter_test_masks(self): for i in self.unique_labels: yield self.labels == i def __repr__(self): return '%s.%s(labels=%s)' % ( self.__class__.__module__, self.__class__.__name__, self.labels, ) def __len__(self): return self.n_unique_labels class LeavePLabelOut(_PartitionIterator): """Leave-P-Label_Out cross-validation iterator Provides train/test indices to split data according to a third-party provided label. This label information can be used to encode arbitrary domain specific stratifications of the samples as integers. For instance the labels could be the year of collection of the samples and thus allow for cross-validation against time-based splits. The difference between LeavePLabelOut and LeaveOneLabelOut is that the former builds the test sets with all the samples assigned to ``p`` different values of the labels while the latter uses samples all assigned the same labels. Read more in the :ref:`User Guide <cross_validation>`. Parameters ---------- labels : array-like of int with shape (n_samples,) Arbitrary domain-specific stratification of the data to be used to draw the splits. p : int Number of samples to leave out in the test split. Examples -------- >>> from sklearn import cross_validation >>> X = np.array([[1, 2], [3, 4], [5, 6]]) >>> y = np.array([1, 2, 1]) >>> labels = np.array([1, 2, 3]) >>> lpl = cross_validation.LeavePLabelOut(labels, p=2) >>> len(lpl) 3 >>> print(lpl) sklearn.cross_validation.LeavePLabelOut(labels=[1 2 3], p=2) >>> for train_index, test_index in lpl: ... print("TRAIN:", train_index, "TEST:", test_index) ... X_train, X_test = X[train_index], X[test_index] ... y_train, y_test = y[train_index], y[test_index] ... print(X_train, X_test, y_train, y_test) TRAIN: [2] TEST: [0 1] [[5 6]] [[1 2] [3 4]] [1] [1 2] TRAIN: [1] TEST: [0 2] [[3 4]] [[1 2] [5 6]] [2] [1 1] TRAIN: [0] TEST: [1 2] [[1 2]] [[3 4] [5 6]] [1] [2 1] """ def __init__(self, labels, p): # We make a copy of labels to avoid side-effects during iteration super(LeavePLabelOut, self).__init__(len(labels)) self.labels = np.array(labels, copy=True) self.unique_labels = np.unique(labels) self.n_unique_labels = len(self.unique_labels) self.p = p def _iter_test_masks(self): comb = combinations(range(self.n_unique_labels), self.p) for idx in comb: test_index = self._empty_mask() idx = np.array(idx) for l in self.unique_labels[idx]: test_index[self.labels == l] = True yield test_index def __repr__(self): return '%s.%s(labels=%s, p=%s)' % ( self.__class__.__module__, self.__class__.__name__, self.labels, self.p, ) def __len__(self): return int(factorial(self.n_unique_labels) / factorial(self.n_unique_labels - self.p) / factorial(self.p)) class BaseShuffleSplit(with_metaclass(ABCMeta)): """Base class for ShuffleSplit and StratifiedShuffleSplit""" def __init__(self, n, n_iter=10, test_size=0.1, train_size=None, random_state=None): self.n = n self.n_iter = n_iter self.test_size = test_size self.train_size = train_size self.random_state = random_state self.n_train, self.n_test = _validate_shuffle_split(n, test_size, train_size) def __iter__(self): for train, test in self._iter_indices(): yield train, test return @abstractmethod def _iter_indices(self): """Generate (train, test) indices""" class ShuffleSplit(BaseShuffleSplit): """Random permutation cross-validation iterator. Yields indices to split data into training and test sets. Note: contrary to other cross-validation strategies, random splits do not guarantee that all folds will be different, although this is still very likely for sizeable datasets. Read more in the :ref:`User Guide <cross_validation>`. Parameters ---------- n : int Total number of elements in the dataset. n_iter : int (default 10) Number of re-shuffling & splitting iterations. test_size : float (default 0.1), int, or None If float, should be between 0.0 and 1.0 and represent the proportion of the dataset to include in the test split. If int, represents the absolute number of test samples. If None, the value is automatically set to the complement of the train size. train_size : float, int, or None (default is None) If float, should be between 0.0 and 1.0 and represent the proportion of the dataset to include in the train split. If int, represents the absolute number of train samples. If None, the value is automatically set to the complement of the test size. random_state : int or RandomState Pseudo-random number generator state used for random sampling. Examples -------- >>> from sklearn import cross_validation >>> rs = cross_validation.ShuffleSplit(4, n_iter=3, ... test_size=.25, random_state=0) >>> len(rs) 3 >>> print(rs) ... # doctest: +ELLIPSIS ShuffleSplit(4, n_iter=3, test_size=0.25, ...) >>> for train_index, test_index in rs: ... print("TRAIN:", train_index, "TEST:", test_index) ... TRAIN: [3 1 0] TEST: [2] TRAIN: [2 1 3] TEST: [0] TRAIN: [0 2 1] TEST: [3] >>> rs = cross_validation.ShuffleSplit(4, n_iter=3, ... train_size=0.5, test_size=.25, random_state=0) >>> for train_index, test_index in rs: ... print("TRAIN:", train_index, "TEST:", test_index) ... TRAIN: [3 1] TEST: [2] TRAIN: [2 1] TEST: [0] TRAIN: [0 2] TEST: [3] """ def _iter_indices(self): rng = check_random_state(self.random_state) for i in range(self.n_iter): # random partition permutation = rng.permutation(self.n) ind_test = permutation[:self.n_test] ind_train = permutation[self.n_test:self.n_test + self.n_train] yield ind_train, ind_test def __repr__(self): return ('%s(%d, n_iter=%d, test_size=%s, ' 'random_state=%s)' % ( self.__class__.__name__, self.n, self.n_iter, str(self.test_size), self.random_state, )) def __len__(self): return self.n_iter def _validate_shuffle_split(n, test_size, train_size): if test_size is None and train_size is None: raise ValueError( 'test_size and train_size can not both be None') if test_size is not None: if np.asarray(test_size).dtype.kind == 'f': if test_size >= 1.: raise ValueError( 'test_size=%f should be smaller ' 'than 1.0 or be an integer' % test_size) elif np.asarray(test_size).dtype.kind == 'i': if test_size >= n: raise ValueError( 'test_size=%d should be smaller ' 'than the number of samples %d' % (test_size, n)) else: raise ValueError("Invalid value for test_size: %r" % test_size) if train_size is not None: if np.asarray(train_size).dtype.kind == 'f': if train_size >= 1.: raise ValueError("train_size=%f should be smaller " "than 1.0 or be an integer" % train_size) elif np.asarray(test_size).dtype.kind == 'f' and \ train_size + test_size > 1.: raise ValueError('The sum of test_size and train_size = %f, ' 'should be smaller than 1.0. Reduce ' 'test_size and/or train_size.' % (train_size + test_size)) elif np.asarray(train_size).dtype.kind == 'i': if train_size >= n: raise ValueError("train_size=%d should be smaller " "than the number of samples %d" % (train_size, n)) else: raise ValueError("Invalid value for train_size: %r" % train_size) if np.asarray(test_size).dtype.kind == 'f': n_test = ceil(test_size * n) elif np.asarray(test_size).dtype.kind == 'i': n_test = float(test_size) if train_size is None: n_train = n - n_test else: if np.asarray(train_size).dtype.kind == 'f': n_train = floor(train_size * n) else: n_train = float(train_size) if test_size is None: n_test = n - n_train if n_train + n_test > n: raise ValueError('The sum of train_size and test_size = %d, ' 'should be smaller than the number of ' 'samples %d. Reduce test_size and/or ' 'train_size.' % (n_train + n_test, n)) return int(n_train), int(n_test) class StratifiedShuffleSplit(BaseShuffleSplit): """Stratified ShuffleSplit cross validation iterator Provides train/test indices to split data in train test sets. This cross-validation object is a merge of StratifiedKFold and ShuffleSplit, which returns stratified randomized folds. The folds are made by preserving the percentage of samples for each class. Note: like the ShuffleSplit strategy, stratified random splits do not guarantee that all folds will be different, although this is still very likely for sizeable datasets. Read more in the :ref:`User Guide <cross_validation>`. Parameters ---------- y : array, [n_samples] Labels of samples. n_iter : int (default 10) Number of re-shuffling & splitting iterations. test_size : float (default 0.1), int, or None If float, should be between 0.0 and 1.0 and represent the proportion of the dataset to include in the test split. If int, represents the absolute number of test samples. If None, the value is automatically set to the complement of the train size. train_size : float, int, or None (default is None) If float, should be between 0.0 and 1.0 and represent the proportion of the dataset to include in the train split. If int, represents the absolute number of train samples. If None, the value is automatically set to the complement of the test size. random_state : int or RandomState Pseudo-random number generator state used for random sampling. Examples -------- >>> from sklearn.cross_validation import StratifiedShuffleSplit >>> X = np.array([[1, 2], [3, 4], [1, 2], [3, 4]]) >>> y = np.array([0, 0, 1, 1]) >>> sss = StratifiedShuffleSplit(y, 3, test_size=0.5, random_state=0) >>> len(sss) 3 >>> print(sss) # doctest: +ELLIPSIS StratifiedShuffleSplit(labels=[0 0 1 1], n_iter=3, ...) >>> for train_index, test_index in sss: ... print("TRAIN:", train_index, "TEST:", test_index) ... X_train, X_test = X[train_index], X[test_index] ... y_train, y_test = y[train_index], y[test_index] TRAIN: [1 2] TEST: [3 0] TRAIN: [0 2] TEST: [1 3] TRAIN: [0 2] TEST: [3 1] """ def __init__(self, y, n_iter=10, test_size=0.1, train_size=None, random_state=None): super(StratifiedShuffleSplit, self).__init__( len(y), n_iter, test_size, train_size, random_state) self.y = np.array(y) self.classes, self.y_indices = np.unique(y, return_inverse=True) n_cls = self.classes.shape[0] if np.min(bincount(self.y_indices)) < 2: raise ValueError("The least populated class in y has only 1" " member, which is too few. The minimum" " number of labels for any class cannot" " be less than 2.") if self.n_train < n_cls: raise ValueError('The train_size = %d should be greater or ' 'equal to the number of classes = %d' % (self.n_train, n_cls)) if self.n_test < n_cls: raise ValueError('The test_size = %d should be greater or ' 'equal to the number of classes = %d' % (self.n_test, n_cls)) def _iter_indices(self): rng = check_random_state(self.random_state) cls_count = bincount(self.y_indices) p_i = cls_count / float(self.n) n_i = np.round(self.n_train * p_i).astype(int) t_i = np.minimum(cls_count - n_i, np.round(self.n_test * p_i).astype(int)) for n in range(self.n_iter): train = [] test = [] for i, cls in enumerate(self.classes): permutation = rng.permutation(cls_count[i]) cls_i = np.where((self.y == cls))[0][permutation] train.extend(cls_i[:n_i[i]]) test.extend(cls_i[n_i[i]:n_i[i] + t_i[i]]) # Because of rounding issues (as n_train and n_test are not # dividers of the number of elements per class), we may end # up here with less samples in train and test than asked for. if len(train) < self.n_train or len(test) < self.n_test: # We complete by affecting randomly the missing indexes missing_idx = np.where(bincount(train + test, minlength=len(self.y)) == 0, )[0] missing_idx = rng.permutation(missing_idx) train.extend(missing_idx[:(self.n_train - len(train))]) test.extend(missing_idx[-(self.n_test - len(test)):]) train = rng.permutation(train) test = rng.permutation(test) yield train, test def __repr__(self): return ('%s(labels=%s, n_iter=%d, test_size=%s, ' 'random_state=%s)' % ( self.__class__.__name__, self.y, self.n_iter, str(self.test_size), self.random_state, )) def __len__(self): return self.n_iter class PredefinedSplit(_PartitionIterator): """Predefined split cross validation iterator Splits the data into training/test set folds according to a predefined scheme. Each sample can be assigned to at most one test set fold, as specified by the user through the ``test_fold`` parameter. Read more in the :ref:`User Guide <cross_validation>`. Parameters ---------- test_fold : "array-like, shape (n_samples,) test_fold[i] gives the test set fold of sample i. A value of -1 indicates that the corresponding sample is not part of any test set folds, but will instead always be put into the training fold. Examples -------- >>> from sklearn.cross_validation import PredefinedSplit >>> X = np.array([[1, 2], [3, 4], [1, 2], [3, 4]]) >>> y = np.array([0, 0, 1, 1]) >>> ps = PredefinedSplit(test_fold=[0, 1, -1, 1]) >>> len(ps) 2 >>> print(ps) # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS sklearn.cross_validation.PredefinedSplit(test_fold=[ 0 1 -1 1]) >>> for train_index, test_index in ps: ... print("TRAIN:", train_index, "TEST:", test_index) ... X_train, X_test = X[train_index], X[test_index] ... y_train, y_test = y[train_index], y[test_index] TRAIN: [1 2 3] TEST: [0] TRAIN: [0 2] TEST: [1 3] """ def __init__(self, test_fold): super(PredefinedSplit, self).__init__(len(test_fold)) self.test_fold = np.array(test_fold, dtype=np.int) self.test_fold = column_or_1d(self.test_fold) self.unique_folds = np.unique(self.test_fold) self.unique_folds = self.unique_folds[self.unique_folds != -1] def _iter_test_indices(self): for f in self.unique_folds: yield np.where(self.test_fold == f)[0] def __repr__(self): return '%s.%s(test_fold=%s)' % ( self.__class__.__module__, self.__class__.__name__, self.test_fold) def __len__(self): return len(self.unique_folds) class LabelShuffleSplit(ShuffleSplit): '''Shuffle-Labels-Out cross-validation iterator Provides randomized train/test indices to split data according to a third-party provided label. This label information can be used to encode arbitrary domain specific stratifications of the samples as integers. For instance the labels could be the year of collection of the samples and thus allow for cross-validation against time-based splits. The difference between LeavePLabelOut and LabelShuffleSplit is that the former generates splits using all subsets of size ``p`` unique labels, whereas LabelShuffleSplit generates a user-determined number of random test splits, each with a user-determined fraction of unique labels. For example, a less computationally intensive alternative to ``LeavePLabelOut(labels, p=10)`` would be ``LabelShuffleSplit(labels, test_size=10, n_iter=100)``. Note: The parameters ``test_size`` and ``train_size`` refer to labels, and not to samples, as in ShuffleSplit. Parameters ---------- labels : array, [n_samples] Labels of samples n_iter : int (default 5) Number of re-shuffling & splitting iterations. test_size : float (default 0.2), int, or None If float, should be between 0.0 and 1.0 and represent the proportion of the labels to include in the test split. If int, represents the absolute number of test labels. If None, the value is automatically set to the complement of the train size. train_size : float, int, or None (default is None) If float, should be between 0.0 and 1.0 and represent the proportion of the labels to include in the train split. If int, represents the absolute number of train labels. If None, the value is automatically set to the complement of the test size. random_state : int or RandomState Pseudo-random number generator state used for random sampling. ''' def __init__(self, labels, n_iter=5, test_size=0.2, train_size=None, random_state=None): classes, label_indices = np.unique(labels, return_inverse=True) super(LabelShuffleSplit, self).__init__( len(classes), n_iter=n_iter, test_size=test_size, train_size=train_size, random_state=random_state) self.labels = labels self.classes = classes self.label_indices = label_indices def __repr__(self): return ('%s(labels=%s, n_iter=%d, test_size=%s, ' 'random_state=%s)' % ( self.__class__.__name__, self.labels, self.n_iter, str(self.test_size), self.random_state, )) def __len__(self): return self.n_iter def _iter_indices(self): for label_train, label_test in super(LabelShuffleSplit, self)._iter_indices(): # these are the indices of classes in the partition # invert them into data indices train = np.flatnonzero(np.in1d(self.label_indices, label_train)) test = np.flatnonzero(np.in1d(self.label_indices, label_test)) yield train, test ############################################################################## def _index_param_value(X, v, indices): """Private helper function for parameter value indexing.""" if not _is_arraylike(v) or _num_samples(v) != _num_samples(X): # pass through: skip indexing return v if sp.issparse(v): v = v.tocsr() return safe_indexing(v, indices) def cross_val_predict(estimator, X, y=None, cv=None, n_jobs=1, verbose=0, fit_params=None, pre_dispatch='2*n_jobs'): """Generate cross-validated estimates for each input data point Read more in the :ref:`User Guide <cross_validation>`. Parameters ---------- estimator : estimator object implementing 'fit' and 'predict' The object to use to fit the data. X : array-like The data to fit. Can be, for example a list, or an array at least 2d. y : array-like, optional, default: None The target variable to try to predict in the case of supervised learning. cv : integer or cross-validation generator, optional, default=3 A cross-validation generator to use. If int, determines the number of folds in StratifiedKFold if estimator is a classifier and the target y is binary or multiclass, or the number of folds in KFold otherwise. Specific cross-validation objects can be passed, see sklearn.cross_validation module for the list of possible objects. This generator must include all elements in the test set exactly once. Otherwise, a ValueError is raised. n_jobs : integer, optional The number of CPUs to use to do the computation. -1 means 'all CPUs'. verbose : integer, optional The verbosity level. fit_params : dict, optional Parameters to pass to the fit method of the estimator. pre_dispatch : int, or string, optional Controls the number of jobs that get dispatched during parallel execution. Reducing this number can be useful to avoid an explosion of memory consumption when more jobs get dispatched than CPUs can process. This parameter can be: - None, in which case all the jobs are immediately created and spawned. Use this for lightweight and fast-running jobs, to avoid delays due to on-demand spawning of the jobs - An int, giving the exact number of total jobs that are spawned - A string, giving an expression as a function of n_jobs, as in '2*n_jobs' Returns ------- preds : ndarray This is the result of calling 'predict' """ X, y = indexable(X, y) cv = check_cv(cv, X, y, classifier=is_classifier(estimator)) # We clone the estimator to make sure that all the folds are # independent, and that it is pickle-able. parallel = Parallel(n_jobs=n_jobs, verbose=verbose, pre_dispatch=pre_dispatch) preds_blocks = parallel(delayed(_fit_and_predict)(clone(estimator), X, y, train, test, verbose, fit_params) for train, test in cv) preds = [p for p, _ in preds_blocks] locs = np.concatenate([loc for _, loc in preds_blocks]) if not _check_is_partition(locs, _num_samples(X)): raise ValueError('cross_val_predict only works for partitions') inv_locs = np.empty(len(locs), dtype=int) inv_locs[locs] = np.arange(len(locs)) # Check for sparse predictions if sp.issparse(preds[0]): preds = sp.vstack(preds, format=preds[0].format) else: preds = np.concatenate(preds) return preds[inv_locs] def _fit_and_predict(estimator, X, y, train, test, verbose, fit_params): """Fit estimator and predict values for a given dataset split. Read more in the :ref:`User Guide <cross_validation>`. Parameters ---------- estimator : estimator object implementing 'fit' and 'predict' The object to use to fit the data. X : array-like of shape at least 2D The data to fit. y : array-like, optional, default: None The target variable to try to predict in the case of supervised learning. train : array-like, shape (n_train_samples,) Indices of training samples. test : array-like, shape (n_test_samples,) Indices of test samples. verbose : integer The verbosity level. fit_params : dict or None Parameters that will be passed to ``estimator.fit``. Returns ------- preds : sequence Result of calling 'estimator.predict' test : array-like This is the value of the test parameter """ # Adjust length of sample weights fit_params = fit_params if fit_params is not None else {} fit_params = dict([(k, _index_param_value(X, v, train)) for k, v in fit_params.items()]) X_train, y_train = _safe_split(estimator, X, y, train) X_test, _ = _safe_split(estimator, X, y, test, train) if y_train is None: estimator.fit(X_train, **fit_params) else: estimator.fit(X_train, y_train, **fit_params) preds = estimator.predict(X_test) return preds, test def _check_is_partition(locs, n): """Check whether locs is a reordering of the array np.arange(n) Parameters ---------- locs : ndarray integer array to test n : int number of expected elements Returns ------- is_partition : bool True iff sorted(locs) is range(n) """ if len(locs) != n: return False hit = np.zeros(n, bool) hit[locs] = True if not np.all(hit): return False return True def cross_val_score(estimator, X, y=None, scoring=None, cv=None, n_jobs=1, verbose=0, fit_params=None, pre_dispatch='2*n_jobs'): """Evaluate a score by cross-validation Read more in the :ref:`User Guide <cross_validation>`. Parameters ---------- estimator : estimator object implementing 'fit' The object to use to fit the data. X : array-like The data to fit. Can be, for example a list, or an array at least 2d. y : array-like, optional, default: None The target variable to try to predict in the case of supervised learning. scoring : string, callable or None, optional, default: None A string (see model evaluation documentation) or a scorer callable object / function with signature ``scorer(estimator, X, y)``. cv : integer or cross-validation generator, optional, default=3 A cross-validation generator to use. If int, determines the number of folds in StratifiedKFold if estimator is a classifier and the target y is binary or multiclass, or the number of folds in KFold otherwise. Specific cross-validation objects can be passed, see sklearn.cross_validation module for the list of possible objects. n_jobs : integer, optional The number of CPUs to use to do the computation. -1 means 'all CPUs'. verbose : integer, optional The verbosity level. fit_params : dict, optional Parameters to pass to the fit method of the estimator. pre_dispatch : int, or string, optional Controls the number of jobs that get dispatched during parallel execution. Reducing this number can be useful to avoid an explosion of memory consumption when more jobs get dispatched than CPUs can process. This parameter can be: - None, in which case all the jobs are immediately created and spawned. Use this for lightweight and fast-running jobs, to avoid delays due to on-demand spawning of the jobs - An int, giving the exact number of total jobs that are spawned - A string, giving an expression as a function of n_jobs, as in '2*n_jobs' Returns ------- scores : array of float, shape=(len(list(cv)),) Array of scores of the estimator for each run of the cross validation. """ X, y = indexable(X, y) cv = check_cv(cv, X, y, classifier=is_classifier(estimator)) scorer = check_scoring(estimator, scoring=scoring) # We clone the estimator to make sure that all the folds are # independent, and that it is pickle-able. parallel = Parallel(n_jobs=n_jobs, verbose=verbose, pre_dispatch=pre_dispatch) scores = parallel(delayed(_fit_and_score)(clone(estimator), X, y, scorer, train, test, verbose, None, fit_params) for train, test in cv) return np.array(scores)[:, 0] class FitFailedWarning(RuntimeWarning): pass def _fit_and_score(estimator, X, y, scorer, train, test, verbose, parameters, fit_params, return_train_score=False, return_parameters=False, error_score='raise'): """Fit estimator and compute scores for a given dataset split. Parameters ---------- estimator : estimator object implementing 'fit' The object to use to fit the data. X : array-like of shape at least 2D The data to fit. y : array-like, optional, default: None The target variable to try to predict in the case of supervised learning. scorer : callable A scorer callable object / function with signature ``scorer(estimator, X, y)``. train : array-like, shape (n_train_samples,) Indices of training samples. test : array-like, shape (n_test_samples,) Indices of test samples. verbose : integer The verbosity level. error_score : 'raise' (default) or numeric Value to assign to the score if an error occurs in estimator fitting. If set to 'raise', the error is raised. If a numeric value is given, FitFailedWarning is raised. This parameter does not affect the refit step, which will always raise the error. parameters : dict or None Parameters to be set on the estimator. fit_params : dict or None Parameters that will be passed to ``estimator.fit``. return_train_score : boolean, optional, default: False Compute and return score on training set. return_parameters : boolean, optional, default: False Return parameters that has been used for the estimator. Returns ------- train_score : float, optional Score on training set, returned only if `return_train_score` is `True`. test_score : float Score on test set. n_test_samples : int Number of test samples. scoring_time : float Time spent for fitting and scoring in seconds. parameters : dict or None, optional The parameters that have been evaluated. """ if verbose > 1: if parameters is None: msg = "no parameters to be set" else: msg = '%s' % (', '.join('%s=%s' % (k, v) for k, v in parameters.items())) print("[CV] %s %s" % (msg, (64 - len(msg)) * '.')) # Adjust length of sample weights fit_params = fit_params if fit_params is not None else {} fit_params = dict([(k, _index_param_value(X, v, train)) for k, v in fit_params.items()]) if parameters is not None: estimator.set_params(**parameters) start_time = time.time() X_train, y_train = _safe_split(estimator, X, y, train) X_test, y_test = _safe_split(estimator, X, y, test, train) try: if y_train is None: estimator.fit(X_train, **fit_params) else: estimator.fit(X_train, y_train, **fit_params) except Exception as e: if error_score == 'raise': raise elif isinstance(error_score, numbers.Number): test_score = error_score if return_train_score: train_score = error_score warnings.warn("Classifier fit failed. The score on this train-test" " partition for these parameters will be set to %f. " "Details: \n%r" % (error_score, e), FitFailedWarning) else: raise ValueError("error_score must be the string 'raise' or a" " numeric value. (Hint: if using 'raise', please" " make sure that it has been spelled correctly.)" ) else: test_score = _score(estimator, X_test, y_test, scorer) if return_train_score: train_score = _score(estimator, X_train, y_train, scorer) scoring_time = time.time() - start_time if verbose > 2: msg += ", score=%f" % test_score if verbose > 1: end_msg = "%s -%s" % (msg, logger.short_format_time(scoring_time)) print("[CV] %s %s" % ((64 - len(end_msg)) * '.', end_msg)) ret = [train_score] if return_train_score else [] ret.extend([test_score, _num_samples(X_test), scoring_time]) if return_parameters: ret.append(parameters) return ret def _safe_split(estimator, X, y, indices, train_indices=None): """Create subset of dataset and properly handle kernels.""" if hasattr(estimator, 'kernel') and callable(estimator.kernel): # cannot compute the kernel values with custom function raise ValueError("Cannot use a custom kernel function. " "Precompute the kernel matrix instead.") if not hasattr(X, "shape"): if getattr(estimator, "_pairwise", False): raise ValueError("Precomputed kernels or affinity matrices have " "to be passed as arrays or sparse matrices.") X_subset = [X[idx] for idx in indices] else: if getattr(estimator, "_pairwise", False): # X is a precomputed square kernel matrix if X.shape[0] != X.shape[1]: raise ValueError("X should be a square kernel matrix") if train_indices is None: X_subset = X[np.ix_(indices, indices)] else: X_subset = X[np.ix_(indices, train_indices)] else: X_subset = safe_indexing(X, indices) if y is not None: y_subset = safe_indexing(y, indices) else: y_subset = None return X_subset, y_subset def _score(estimator, X_test, y_test, scorer): """Compute the score of an estimator on a given test set.""" if y_test is None: score = scorer(estimator, X_test) else: score = scorer(estimator, X_test, y_test) if not isinstance(score, numbers.Number): raise ValueError("scoring must return a number, got %s (%s) instead." % (str(score), type(score))) return score def _permutation_test_score(estimator, X, y, cv, scorer): """Auxiliary function for permutation_test_score""" avg_score = [] for train, test in cv: estimator.fit(X[train], y[train]) avg_score.append(scorer(estimator, X[test], y[test])) return np.mean(avg_score) def _shuffle(y, labels, random_state): """Return a shuffled copy of y eventually shuffle among same labels.""" if labels is None: ind = random_state.permutation(len(y)) else: ind = np.arange(len(labels)) for label in np.unique(labels): this_mask = (labels == label) ind[this_mask] = random_state.permutation(ind[this_mask]) return y[ind] def check_cv(cv, X=None, y=None, classifier=False): """Input checker utility for building a CV in a user friendly way. Parameters ---------- cv : int, a cv generator instance, or None The input specifying which cv generator to use. It can be an integer, in which case it is the number of folds in a KFold, None, in which case 3 fold is used, or another object, that will then be used as a cv generator. X : array-like The data the cross-val object will be applied on. y : array-like The target variable for a supervised learning problem. classifier : boolean optional Whether the task is a classification task, in which case stratified KFold will be used. Returns ------- checked_cv: a cross-validation generator instance. The return value is guaranteed to be a cv generator instance, whatever the input type. """ is_sparse = sp.issparse(X) if cv is None: cv = 3 if isinstance(cv, numbers.Integral): if classifier: if type_of_target(y) in ['binary', 'multiclass']: cv = StratifiedKFold(y, cv) else: cv = KFold(_num_samples(y), cv) else: if not is_sparse: n_samples = len(X) else: n_samples = X.shape[0] cv = KFold(n_samples, cv) return cv def permutation_test_score(estimator, X, y, cv=None, n_permutations=100, n_jobs=1, labels=None, random_state=0, verbose=0, scoring=None): """Evaluate the significance of a cross-validated score with permutations Read more in the :ref:`User Guide <cross_validation>`. Parameters ---------- estimator : estimator object implementing 'fit' The object to use to fit the data. X : array-like of shape at least 2D The data to fit. y : array-like The target variable to try to predict in the case of supervised learning. scoring : string, callable or None, optional, default: None A string (see model evaluation documentation) or a scorer callable object / function with signature ``scorer(estimator, X, y)``. cv : integer or cross-validation generator, optional, default=3 A cross-validation generator to use. If int, determines the number of folds in StratifiedKFold if estimator is a classifier and the target y is binary or multiclass, or the number of folds in KFold otherwise. Specific cross-validation objects can be passed, see sklearn.cross_validation module for the list of possible objects. n_permutations : integer, optional Number of times to permute ``y``. n_jobs : integer, optional The number of CPUs to use to do the computation. -1 means 'all CPUs'. labels : array-like of shape [n_samples] (optional) Labels constrain the permutation among groups of samples with a same label. random_state : RandomState or an int seed (0 by default) A random number generator instance to define the state of the random permutations generator. verbose : integer, optional The verbosity level. Returns ------- score : float The true score without permuting targets. permutation_scores : array, shape (n_permutations,) The scores obtained for each permutations. pvalue : float The returned value equals p-value if `scoring` returns bigger numbers for better scores (e.g., accuracy_score). If `scoring` is rather a loss function (i.e. when lower is better such as with `mean_squared_error`) then this is actually the complement of the p-value: 1 - p-value. Notes ----- This function implements Test 1 in: Ojala and Garriga. Permutation Tests for Studying Classifier Performance. The Journal of Machine Learning Research (2010) vol. 11 """ X, y = indexable(X, y) cv = check_cv(cv, X, y, classifier=is_classifier(estimator)) scorer = check_scoring(estimator, scoring=scoring) random_state = check_random_state(random_state) # We clone the estimator to make sure that all the folds are # independent, and that it is pickle-able. score = _permutation_test_score(clone(estimator), X, y, cv, scorer) permutation_scores = Parallel(n_jobs=n_jobs, verbose=verbose)( delayed(_permutation_test_score)( clone(estimator), X, _shuffle(y, labels, random_state), cv, scorer) for _ in range(n_permutations)) permutation_scores = np.array(permutation_scores) pvalue = (np.sum(permutation_scores >= score) + 1.0) / (n_permutations + 1) return score, permutation_scores, pvalue permutation_test_score.__test__ = False # to avoid a pb with nosetests def train_test_split(*arrays, **options): """Split arrays or matrices into random train and test subsets Quick utility that wraps input validation and ``next(iter(ShuffleSplit(n_samples)))`` and application to input data into a single call for splitting (and optionally subsampling) data in a oneliner. Read more in the :ref:`User Guide <cross_validation>`. Parameters ---------- *arrays : sequence of arrays or scipy.sparse matrices with same shape[0] Python lists or tuples occurring in arrays are converted to 1D numpy arrays. test_size : float, int, or None (default is None) If float, should be between 0.0 and 1.0 and represent the proportion of the dataset to include in the test split. If int, represents the absolute number of test samples. If None, the value is automatically set to the complement of the train size. If train size is also None, test size is set to 0.25. train_size : float, int, or None (default is None) If float, should be between 0.0 and 1.0 and represent the proportion of the dataset to include in the train split. If int, represents the absolute number of train samples. If None, the value is automatically set to the complement of the test size. random_state : int or RandomState Pseudo-random number generator state used for random sampling. stratify : array-like or None (default is None) If not None, data is split in a stratified fashion, using this as the labels array. Returns ------- splitting : list of arrays, length=2 * len(arrays) List containing train-test split of input array. Examples -------- >>> import numpy as np >>> from sklearn.cross_validation import train_test_split >>> X, y = np.arange(10).reshape((5, 2)), range(5) >>> X array([[0, 1], [2, 3], [4, 5], [6, 7], [8, 9]]) >>> list(y) [0, 1, 2, 3, 4] >>> X_train, X_test, y_train, y_test = train_test_split( ... X, y, test_size=0.33, random_state=42) ... >>> X_train array([[4, 5], [0, 1], [6, 7]]) >>> y_train [2, 0, 3] >>> X_test array([[2, 3], [8, 9]]) >>> y_test [1, 4] """ n_arrays = len(arrays) if n_arrays == 0: raise ValueError("At least one array required as input") test_size = options.pop('test_size', None) train_size = options.pop('train_size', None) random_state = options.pop('random_state', None) dtype = options.pop('dtype', None) if dtype is not None: warnings.warn("dtype option is ignored and will be removed in 0.18.", DeprecationWarning) allow_nd = options.pop('allow_nd', None) allow_lists = options.pop('allow_lists', None) stratify = options.pop('stratify', None) if allow_lists is not None: warnings.warn("The allow_lists option is deprecated and will be " "assumed True in 0.18 and removed.", DeprecationWarning) if options: raise TypeError("Invalid parameters passed: %s" % str(options)) if allow_nd is not None: warnings.warn("The allow_nd option is deprecated and will be " "assumed True in 0.18 and removed.", DeprecationWarning) if allow_lists is False or allow_nd is False: arrays = [check_array(x, 'csr', allow_nd=allow_nd, force_all_finite=False, ensure_2d=False) if x is not None else x for x in arrays] if test_size is None and train_size is None: test_size = 0.25 arrays = indexable(*arrays) if stratify is not None: cv = StratifiedShuffleSplit(stratify, test_size=test_size, train_size=train_size, random_state=random_state) else: n_samples = _num_samples(arrays[0]) cv = ShuffleSplit(n_samples, test_size=test_size, train_size=train_size, random_state=random_state) train, test = next(iter(cv)) return list(chain.from_iterable((safe_indexing(a, train), safe_indexing(a, test)) for a in arrays)) train_test_split.__test__ = False # to avoid a pb with nosetests
bsd-3-clause
DonBeo/scikit-learn
sklearn/preprocessing/tests/test_data.py
8
31730
import warnings import numpy as np import numpy.linalg as la from scipy import sparse from distutils.version import LooseVersion from sklearn.utils.testing import assert_almost_equal, clean_warning_registry from sklearn.utils.testing import assert_array_almost_equal from sklearn.utils.testing import assert_array_equal from sklearn.utils.testing import assert_equal from sklearn.utils.testing import assert_greater_equal from sklearn.utils.testing import assert_less_equal from sklearn.utils.testing import assert_raises from sklearn.utils.testing import assert_raises_regex from sklearn.utils.testing import assert_true from sklearn.utils.testing import assert_false from sklearn.utils.testing import assert_warns_message from sklearn.utils.testing import assert_no_warnings from sklearn.utils.sparsefuncs import mean_variance_axis from sklearn.preprocessing.data import _transform_selected from sklearn.preprocessing.data import Binarizer from sklearn.preprocessing.data import KernelCenterer from sklearn.preprocessing.data import Normalizer from sklearn.preprocessing.data import normalize from sklearn.preprocessing.data import OneHotEncoder from sklearn.preprocessing.data import StandardScaler from sklearn.preprocessing.data import scale from sklearn.preprocessing.data import MinMaxScaler from sklearn.preprocessing.data import add_dummy_feature from sklearn.preprocessing.data import PolynomialFeatures from sklearn import datasets iris = datasets.load_iris() def toarray(a): if hasattr(a, "toarray"): a = a.toarray() return a def test_polynomial_features(): # Test Polynomial Features X1 = np.arange(6)[:, np.newaxis] P1 = np.hstack([np.ones_like(X1), X1, X1 ** 2, X1 ** 3]) deg1 = 3 X2 = np.arange(6).reshape((3, 2)) x1 = X2[:, :1] x2 = X2[:, 1:] P2 = np.hstack([x1 ** 0 * x2 ** 0, x1 ** 1 * x2 ** 0, x1 ** 0 * x2 ** 1, x1 ** 2 * x2 ** 0, x1 ** 1 * x2 ** 1, x1 ** 0 * x2 ** 2]) deg2 = 2 for (deg, X, P) in [(deg1, X1, P1), (deg2, X2, P2)]: P_test = PolynomialFeatures(deg, include_bias=True).fit_transform(X) assert_array_almost_equal(P_test, P) P_test = PolynomialFeatures(deg, include_bias=False).fit_transform(X) assert_array_almost_equal(P_test, P[:, 1:]) interact = PolynomialFeatures(2, interaction_only=True, include_bias=True) X_poly = interact.fit_transform(X) assert_array_almost_equal(X_poly, P2[:, [0, 1, 2, 4]]) def test_scaler_1d(): # Test scaling of dataset along single axis rng = np.random.RandomState(0) X = rng.randn(5) X_orig_copy = X.copy() scaler = StandardScaler() X_scaled = scaler.fit(X).transform(X, copy=False) assert_array_almost_equal(X_scaled.mean(axis=0), 0.0) assert_array_almost_equal(X_scaled.std(axis=0), 1.0) # check inverse transform X_scaled_back = scaler.inverse_transform(X_scaled) assert_array_almost_equal(X_scaled_back, X_orig_copy) # Test with 1D list X = [0., 1., 2, 0.4, 1.] scaler = StandardScaler() X_scaled = scaler.fit(X).transform(X, copy=False) assert_array_almost_equal(X_scaled.mean(axis=0), 0.0) assert_array_almost_equal(X_scaled.std(axis=0), 1.0) X_scaled = scale(X) assert_array_almost_equal(X_scaled.mean(axis=0), 0.0) assert_array_almost_equal(X_scaled.std(axis=0), 1.0) X = np.ones(5) assert_array_equal(scale(X, with_mean=False), X) def test_standard_scaler_numerical_stability(): """Test numerical stability of scaling""" # np.log(1e-5) is taken because of its floating point representation # was empirically found to cause numerical problems with np.mean & np.std. x = np.zeros(8, dtype=np.float64) + np.log(1e-5, dtype=np.float64) if LooseVersion(np.__version__) >= LooseVersion('1.9'): # This does not raise a warning as the number of samples is too low # to trigger the problem in recent numpy x_scaled = assert_no_warnings(scale, x) assert_array_almost_equal(scale(x), np.zeros(8)) else: w = "standard deviation of the data is probably very close to 0" x_scaled = assert_warns_message(UserWarning, w, scale, x) assert_array_almost_equal(x_scaled, np.zeros(8)) # with 2 more samples, the std computation run into numerical issues: x = np.zeros(10, dtype=np.float64) + np.log(1e-5, dtype=np.float64) w = "standard deviation of the data is probably very close to 0" x_scaled = assert_warns_message(UserWarning, w, scale, x) assert_array_almost_equal(x_scaled, np.zeros(10)) x = np.ones(10, dtype=np.float64) * 1e-100 x_small_scaled = assert_no_warnings(scale, x) assert_array_almost_equal(x_small_scaled, np.zeros(10)) # Large values can cause (often recoverable) numerical stability issues: x_big = np.ones(10, dtype=np.float64) * 1e100 w = "Dataset may contain too large values" x_big_scaled = assert_warns_message(UserWarning, w, scale, x_big) assert_array_almost_equal(x_big_scaled, np.zeros(10)) assert_array_almost_equal(x_big_scaled, x_small_scaled) x_big_centered = assert_warns_message(UserWarning, w, scale, x_big, with_std=False) assert_array_almost_equal(x_big_centered, np.zeros(10)) assert_array_almost_equal(x_big_centered, x_small_scaled) def test_scaler_2d_arrays(): # Test scaling of 2d array along first axis rng = np.random.RandomState(0) X = rng.randn(4, 5) X[:, 0] = 0.0 # first feature is always of zero scaler = StandardScaler() X_scaled = scaler.fit(X).transform(X, copy=True) assert_false(np.any(np.isnan(X_scaled))) assert_array_almost_equal(X_scaled.mean(axis=0), 5 * [0.0]) assert_array_almost_equal(X_scaled.std(axis=0), [0., 1., 1., 1., 1.]) # Check that X has been copied assert_true(X_scaled is not X) # check inverse transform X_scaled_back = scaler.inverse_transform(X_scaled) assert_true(X_scaled_back is not X) assert_true(X_scaled_back is not X_scaled) assert_array_almost_equal(X_scaled_back, X) X_scaled = scale(X, axis=1, with_std=False) assert_false(np.any(np.isnan(X_scaled))) assert_array_almost_equal(X_scaled.mean(axis=1), 4 * [0.0]) X_scaled = scale(X, axis=1, with_std=True) assert_false(np.any(np.isnan(X_scaled))) assert_array_almost_equal(X_scaled.mean(axis=1), 4 * [0.0]) assert_array_almost_equal(X_scaled.std(axis=1), 4 * [1.0]) # Check that the data hasn't been modified assert_true(X_scaled is not X) X_scaled = scaler.fit(X).transform(X, copy=False) assert_false(np.any(np.isnan(X_scaled))) assert_array_almost_equal(X_scaled.mean(axis=0), 5 * [0.0]) assert_array_almost_equal(X_scaled.std(axis=0), [0., 1., 1., 1., 1.]) # Check that X has not been copied assert_true(X_scaled is X) X = rng.randn(4, 5) X[:, 0] = 1.0 # first feature is a constant, non zero feature scaler = StandardScaler() X_scaled = scaler.fit(X).transform(X, copy=True) assert_false(np.any(np.isnan(X_scaled))) assert_array_almost_equal(X_scaled.mean(axis=0), 5 * [0.0]) assert_array_almost_equal(X_scaled.std(axis=0), [0., 1., 1., 1., 1.]) # Check that X has not been copied assert_true(X_scaled is not X) def test_min_max_scaler_iris(): X = iris.data scaler = MinMaxScaler() # default params X_trans = scaler.fit_transform(X) assert_array_almost_equal(X_trans.min(axis=0), 0) assert_array_almost_equal(X_trans.min(axis=0), 0) assert_array_almost_equal(X_trans.max(axis=0), 1) X_trans_inv = scaler.inverse_transform(X_trans) assert_array_almost_equal(X, X_trans_inv) # not default params: min=1, max=2 scaler = MinMaxScaler(feature_range=(1, 2)) X_trans = scaler.fit_transform(X) assert_array_almost_equal(X_trans.min(axis=0), 1) assert_array_almost_equal(X_trans.max(axis=0), 2) X_trans_inv = scaler.inverse_transform(X_trans) assert_array_almost_equal(X, X_trans_inv) # min=-.5, max=.6 scaler = MinMaxScaler(feature_range=(-.5, .6)) X_trans = scaler.fit_transform(X) assert_array_almost_equal(X_trans.min(axis=0), -.5) assert_array_almost_equal(X_trans.max(axis=0), .6) X_trans_inv = scaler.inverse_transform(X_trans) assert_array_almost_equal(X, X_trans_inv) # raises on invalid range scaler = MinMaxScaler(feature_range=(2, 1)) assert_raises(ValueError, scaler.fit, X) def test_min_max_scaler_zero_variance_features(): # Check min max scaler on toy data with zero variance features X = [[0., 1., +0.5], [0., 1., -0.1], [0., 1., +1.1]] X_new = [[+0., 2., 0.5], [-1., 1., 0.0], [+0., 1., 1.5]] # default params scaler = MinMaxScaler() X_trans = scaler.fit_transform(X) X_expected_0_1 = [[0., 0., 0.5], [0., 0., 0.0], [0., 0., 1.0]] assert_array_almost_equal(X_trans, X_expected_0_1) X_trans_inv = scaler.inverse_transform(X_trans) assert_array_almost_equal(X, X_trans_inv) X_trans_new = scaler.transform(X_new) X_expected_0_1_new = [[+0., 1., 0.500], [-1., 0., 0.083], [+0., 0., 1.333]] assert_array_almost_equal(X_trans_new, X_expected_0_1_new, decimal=2) # not default params scaler = MinMaxScaler(feature_range=(1, 2)) X_trans = scaler.fit_transform(X) X_expected_1_2 = [[1., 1., 1.5], [1., 1., 1.0], [1., 1., 2.0]] assert_array_almost_equal(X_trans, X_expected_1_2) def test_min_max_scaler_1d(): # Test scaling of dataset along single axis rng = np.random.RandomState(0) X = rng.randn(5) X_orig_copy = X.copy() scaler = MinMaxScaler() X_scaled = scaler.fit(X).transform(X) assert_array_almost_equal(X_scaled.min(axis=0), 0.0) assert_array_almost_equal(X_scaled.max(axis=0), 1.0) # check inverse transform X_scaled_back = scaler.inverse_transform(X_scaled) assert_array_almost_equal(X_scaled_back, X_orig_copy) # Test with 1D list X = [0., 1., 2, 0.4, 1.] scaler = MinMaxScaler() X_scaled = scaler.fit(X).transform(X) assert_array_almost_equal(X_scaled.min(axis=0), 0.0) assert_array_almost_equal(X_scaled.max(axis=0), 1.0) # Constant feature. X = np.zeros(5) scaler = MinMaxScaler() X_scaled = scaler.fit(X).transform(X) assert_greater_equal(X_scaled.min(), 0.) assert_less_equal(X_scaled.max(), 1.) def test_scaler_without_centering(): rng = np.random.RandomState(42) X = rng.randn(4, 5) X[:, 0] = 0.0 # first feature is always of zero X_csr = sparse.csr_matrix(X) X_csc = sparse.csc_matrix(X) assert_raises(ValueError, StandardScaler().fit, X_csr) null_transform = StandardScaler(with_mean=False, with_std=False, copy=True) X_null = null_transform.fit_transform(X_csr) assert_array_equal(X_null.data, X_csr.data) X_orig = null_transform.inverse_transform(X_null) assert_array_equal(X_orig.data, X_csr.data) scaler = StandardScaler(with_mean=False).fit(X) X_scaled = scaler.transform(X, copy=True) assert_false(np.any(np.isnan(X_scaled))) scaler_csr = StandardScaler(with_mean=False).fit(X_csr) X_csr_scaled = scaler_csr.transform(X_csr, copy=True) assert_false(np.any(np.isnan(X_csr_scaled.data))) scaler_csc = StandardScaler(with_mean=False).fit(X_csc) X_csc_scaled = scaler_csr.transform(X_csc, copy=True) assert_false(np.any(np.isnan(X_csc_scaled.data))) assert_equal(scaler.mean_, scaler_csr.mean_) assert_array_almost_equal(scaler.std_, scaler_csr.std_) assert_equal(scaler.mean_, scaler_csc.mean_) assert_array_almost_equal(scaler.std_, scaler_csc.std_) assert_array_almost_equal( X_scaled.mean(axis=0), [0., -0.01, 2.24, -0.35, -0.78], 2) assert_array_almost_equal(X_scaled.std(axis=0), [0., 1., 1., 1., 1.]) X_csr_scaled_mean, X_csr_scaled_std = mean_variance_axis(X_csr_scaled, 0) assert_array_almost_equal(X_csr_scaled_mean, X_scaled.mean(axis=0)) assert_array_almost_equal(X_csr_scaled_std, X_scaled.std(axis=0)) # Check that X has not been modified (copy) assert_true(X_scaled is not X) assert_true(X_csr_scaled is not X_csr) X_scaled_back = scaler.inverse_transform(X_scaled) assert_true(X_scaled_back is not X) assert_true(X_scaled_back is not X_scaled) assert_array_almost_equal(X_scaled_back, X) X_csr_scaled_back = scaler_csr.inverse_transform(X_csr_scaled) assert_true(X_csr_scaled_back is not X_csr) assert_true(X_csr_scaled_back is not X_csr_scaled) assert_array_almost_equal(X_csr_scaled_back.toarray(), X) X_csc_scaled_back = scaler_csr.inverse_transform(X_csc_scaled.tocsc()) assert_true(X_csc_scaled_back is not X_csc) assert_true(X_csc_scaled_back is not X_csc_scaled) assert_array_almost_equal(X_csc_scaled_back.toarray(), X) def test_scaler_int(): # test that scaler converts integer input to floating # for both sparse and dense matrices rng = np.random.RandomState(42) X = rng.randint(20, size=(4, 5)) X[:, 0] = 0 # first feature is always of zero X_csr = sparse.csr_matrix(X) X_csc = sparse.csc_matrix(X) null_transform = StandardScaler(with_mean=False, with_std=False, copy=True) clean_warning_registry() with warnings.catch_warnings(record=True): X_null = null_transform.fit_transform(X_csr) assert_array_equal(X_null.data, X_csr.data) X_orig = null_transform.inverse_transform(X_null) assert_array_equal(X_orig.data, X_csr.data) clean_warning_registry() with warnings.catch_warnings(record=True): scaler = StandardScaler(with_mean=False).fit(X) X_scaled = scaler.transform(X, copy=True) assert_false(np.any(np.isnan(X_scaled))) clean_warning_registry() with warnings.catch_warnings(record=True): scaler_csr = StandardScaler(with_mean=False).fit(X_csr) X_csr_scaled = scaler_csr.transform(X_csr, copy=True) assert_false(np.any(np.isnan(X_csr_scaled.data))) clean_warning_registry() with warnings.catch_warnings(record=True): scaler_csc = StandardScaler(with_mean=False).fit(X_csc) X_csc_scaled = scaler_csr.transform(X_csc, copy=True) assert_false(np.any(np.isnan(X_csc_scaled.data))) assert_equal(scaler.mean_, scaler_csr.mean_) assert_array_almost_equal(scaler.std_, scaler_csr.std_) assert_equal(scaler.mean_, scaler_csc.mean_) assert_array_almost_equal(scaler.std_, scaler_csc.std_) assert_array_almost_equal( X_scaled.mean(axis=0), [0., 1.109, 1.856, 21., 1.559], 2) assert_array_almost_equal(X_scaled.std(axis=0), [0., 1., 1., 1., 1.]) X_csr_scaled_mean, X_csr_scaled_std = mean_variance_axis( X_csr_scaled.astype(np.float), 0) assert_array_almost_equal(X_csr_scaled_mean, X_scaled.mean(axis=0)) assert_array_almost_equal(X_csr_scaled_std, X_scaled.std(axis=0)) # Check that X has not been modified (copy) assert_true(X_scaled is not X) assert_true(X_csr_scaled is not X_csr) X_scaled_back = scaler.inverse_transform(X_scaled) assert_true(X_scaled_back is not X) assert_true(X_scaled_back is not X_scaled) assert_array_almost_equal(X_scaled_back, X) X_csr_scaled_back = scaler_csr.inverse_transform(X_csr_scaled) assert_true(X_csr_scaled_back is not X_csr) assert_true(X_csr_scaled_back is not X_csr_scaled) assert_array_almost_equal(X_csr_scaled_back.toarray(), X) X_csc_scaled_back = scaler_csr.inverse_transform(X_csc_scaled.tocsc()) assert_true(X_csc_scaled_back is not X_csc) assert_true(X_csc_scaled_back is not X_csc_scaled) assert_array_almost_equal(X_csc_scaled_back.toarray(), X) def test_scaler_without_copy(): # Check that StandardScaler.fit does not change input rng = np.random.RandomState(42) X = rng.randn(4, 5) X[:, 0] = 0.0 # first feature is always of zero X_csr = sparse.csr_matrix(X) X_copy = X.copy() StandardScaler(copy=False).fit(X) assert_array_equal(X, X_copy) X_csr_copy = X_csr.copy() StandardScaler(with_mean=False, copy=False).fit(X_csr) assert_array_equal(X_csr.toarray(), X_csr_copy.toarray()) def test_scale_sparse_with_mean_raise_exception(): rng = np.random.RandomState(42) X = rng.randn(4, 5) X_csr = sparse.csr_matrix(X) # check scaling and fit with direct calls on sparse data assert_raises(ValueError, scale, X_csr, with_mean=True) assert_raises(ValueError, StandardScaler(with_mean=True).fit, X_csr) # check transform and inverse_transform after a fit on a dense array scaler = StandardScaler(with_mean=True).fit(X) assert_raises(ValueError, scaler.transform, X_csr) X_transformed_csr = sparse.csr_matrix(scaler.transform(X)) assert_raises(ValueError, scaler.inverse_transform, X_transformed_csr) def test_scale_input_finiteness_validation(): # Check if non finite inputs raise ValueError X = [np.nan, 5, 6, 7, 8] assert_raises_regex(ValueError, "Input contains NaN, infinity or a value too large", scale, X) X = [np.inf, 5, 6, 7, 8] assert_raises_regex(ValueError, "Input contains NaN, infinity or a value too large", scale, X) def test_scale_function_without_centering(): rng = np.random.RandomState(42) X = rng.randn(4, 5) X[:, 0] = 0.0 # first feature is always of zero X_csr = sparse.csr_matrix(X) X_scaled = scale(X, with_mean=False) assert_false(np.any(np.isnan(X_scaled))) X_csr_scaled = scale(X_csr, with_mean=False) assert_false(np.any(np.isnan(X_csr_scaled.data))) # test csc has same outcome X_csc_scaled = scale(X_csr.tocsc(), with_mean=False) assert_array_almost_equal(X_scaled, X_csc_scaled.toarray()) # raises value error on axis != 0 assert_raises(ValueError, scale, X_csr, with_mean=False, axis=1) assert_array_almost_equal(X_scaled.mean(axis=0), [0., -0.01, 2.24, -0.35, -0.78], 2) assert_array_almost_equal(X_scaled.std(axis=0), [0., 1., 1., 1., 1.]) # Check that X has not been copied assert_true(X_scaled is not X) X_csr_scaled_mean, X_csr_scaled_std = mean_variance_axis(X_csr_scaled, 0) assert_array_almost_equal(X_csr_scaled_mean, X_scaled.mean(axis=0)) assert_array_almost_equal(X_csr_scaled_std, X_scaled.std(axis=0)) def test_warning_scaling_integers(): # Check warning when scaling integer data X = np.array([[1, 2, 0], [0, 0, 0]], dtype=np.uint8) w = "assumes floating point values as input, got uint8" clean_warning_registry() assert_warns_message(UserWarning, w, scale, X) assert_warns_message(UserWarning, w, StandardScaler().fit, X) assert_warns_message(UserWarning, w, MinMaxScaler().fit, X) def test_normalizer_l1(): rng = np.random.RandomState(0) X_dense = rng.randn(4, 5) X_sparse_unpruned = sparse.csr_matrix(X_dense) # set the row number 3 to zero X_dense[3, :] = 0.0 # set the row number 3 to zero without pruning (can happen in real life) indptr_3 = X_sparse_unpruned.indptr[3] indptr_4 = X_sparse_unpruned.indptr[4] X_sparse_unpruned.data[indptr_3:indptr_4] = 0.0 # build the pruned variant using the regular constructor X_sparse_pruned = sparse.csr_matrix(X_dense) # check inputs that support the no-copy optim for X in (X_dense, X_sparse_pruned, X_sparse_unpruned): normalizer = Normalizer(norm='l1', copy=True) X_norm = normalizer.transform(X) assert_true(X_norm is not X) X_norm1 = toarray(X_norm) normalizer = Normalizer(norm='l1', copy=False) X_norm = normalizer.transform(X) assert_true(X_norm is X) X_norm2 = toarray(X_norm) for X_norm in (X_norm1, X_norm2): row_sums = np.abs(X_norm).sum(axis=1) for i in range(3): assert_almost_equal(row_sums[i], 1.0) assert_almost_equal(row_sums[3], 0.0) # check input for which copy=False won't prevent a copy for init in (sparse.coo_matrix, sparse.csc_matrix, sparse.lil_matrix): X = init(X_dense) X_norm = normalizer = Normalizer(norm='l2', copy=False).transform(X) assert_true(X_norm is not X) assert_true(isinstance(X_norm, sparse.csr_matrix)) X_norm = toarray(X_norm) for i in range(3): assert_almost_equal(row_sums[i], 1.0) assert_almost_equal(la.norm(X_norm[3]), 0.0) def test_normalizer_l2(): rng = np.random.RandomState(0) X_dense = rng.randn(4, 5) X_sparse_unpruned = sparse.csr_matrix(X_dense) # set the row number 3 to zero X_dense[3, :] = 0.0 # set the row number 3 to zero without pruning (can happen in real life) indptr_3 = X_sparse_unpruned.indptr[3] indptr_4 = X_sparse_unpruned.indptr[4] X_sparse_unpruned.data[indptr_3:indptr_4] = 0.0 # build the pruned variant using the regular constructor X_sparse_pruned = sparse.csr_matrix(X_dense) # check inputs that support the no-copy optim for X in (X_dense, X_sparse_pruned, X_sparse_unpruned): normalizer = Normalizer(norm='l2', copy=True) X_norm1 = normalizer.transform(X) assert_true(X_norm1 is not X) X_norm1 = toarray(X_norm1) normalizer = Normalizer(norm='l2', copy=False) X_norm2 = normalizer.transform(X) assert_true(X_norm2 is X) X_norm2 = toarray(X_norm2) for X_norm in (X_norm1, X_norm2): for i in range(3): assert_almost_equal(la.norm(X_norm[i]), 1.0) assert_almost_equal(la.norm(X_norm[3]), 0.0) # check input for which copy=False won't prevent a copy for init in (sparse.coo_matrix, sparse.csc_matrix, sparse.lil_matrix): X = init(X_dense) X_norm = normalizer = Normalizer(norm='l2', copy=False).transform(X) assert_true(X_norm is not X) assert_true(isinstance(X_norm, sparse.csr_matrix)) X_norm = toarray(X_norm) for i in range(3): assert_almost_equal(la.norm(X_norm[i]), 1.0) assert_almost_equal(la.norm(X_norm[3]), 0.0) def test_normalize(): # Test normalize function # Only tests functionality not used by the tests for Normalizer. X = np.random.RandomState(37).randn(3, 2) assert_array_equal(normalize(X, copy=False), normalize(X.T, axis=0, copy=False).T) assert_raises(ValueError, normalize, [[0]], axis=2) assert_raises(ValueError, normalize, [[0]], norm='l3') def test_binarizer(): X_ = np.array([[1, 0, 5], [2, 3, -1]]) for init in (np.array, list, sparse.csr_matrix, sparse.csc_matrix): X = init(X_.copy()) binarizer = Binarizer(threshold=2.0, copy=True) X_bin = toarray(binarizer.transform(X)) assert_equal(np.sum(X_bin == 0), 4) assert_equal(np.sum(X_bin == 1), 2) X_bin = binarizer.transform(X) assert_equal(sparse.issparse(X), sparse.issparse(X_bin)) binarizer = Binarizer(copy=True).fit(X) X_bin = toarray(binarizer.transform(X)) assert_true(X_bin is not X) assert_equal(np.sum(X_bin == 0), 2) assert_equal(np.sum(X_bin == 1), 4) binarizer = Binarizer(copy=True) X_bin = binarizer.transform(X) assert_true(X_bin is not X) X_bin = toarray(X_bin) assert_equal(np.sum(X_bin == 0), 2) assert_equal(np.sum(X_bin == 1), 4) binarizer = Binarizer(copy=False) X_bin = binarizer.transform(X) if init is not list: assert_true(X_bin is X) X_bin = toarray(X_bin) assert_equal(np.sum(X_bin == 0), 2) assert_equal(np.sum(X_bin == 1), 4) binarizer = Binarizer(threshold=-0.5, copy=True) for init in (np.array, list): X = init(X_.copy()) X_bin = toarray(binarizer.transform(X)) assert_equal(np.sum(X_bin == 0), 1) assert_equal(np.sum(X_bin == 1), 5) X_bin = binarizer.transform(X) # Cannot use threshold < 0 for sparse assert_raises(ValueError, binarizer.transform, sparse.csc_matrix(X)) def test_center_kernel(): # Test that KernelCenterer is equivalent to StandardScaler # in feature space rng = np.random.RandomState(0) X_fit = rng.random_sample((5, 4)) scaler = StandardScaler(with_std=False) scaler.fit(X_fit) X_fit_centered = scaler.transform(X_fit) K_fit = np.dot(X_fit, X_fit.T) # center fit time matrix centerer = KernelCenterer() K_fit_centered = np.dot(X_fit_centered, X_fit_centered.T) K_fit_centered2 = centerer.fit_transform(K_fit) assert_array_almost_equal(K_fit_centered, K_fit_centered2) # center predict time matrix X_pred = rng.random_sample((2, 4)) K_pred = np.dot(X_pred, X_fit.T) X_pred_centered = scaler.transform(X_pred) K_pred_centered = np.dot(X_pred_centered, X_fit_centered.T) K_pred_centered2 = centerer.transform(K_pred) assert_array_almost_equal(K_pred_centered, K_pred_centered2) def test_fit_transform(): rng = np.random.RandomState(0) X = rng.random_sample((5, 4)) for obj in ((StandardScaler(), Normalizer(), Binarizer())): X_transformed = obj.fit(X).transform(X) X_transformed2 = obj.fit_transform(X) assert_array_equal(X_transformed, X_transformed2) def test_add_dummy_feature(): X = [[1, 0], [0, 1], [0, 1]] X = add_dummy_feature(X) assert_array_equal(X, [[1, 1, 0], [1, 0, 1], [1, 0, 1]]) def test_add_dummy_feature_coo(): X = sparse.coo_matrix([[1, 0], [0, 1], [0, 1]]) X = add_dummy_feature(X) assert_true(sparse.isspmatrix_coo(X), X) assert_array_equal(X.toarray(), [[1, 1, 0], [1, 0, 1], [1, 0, 1]]) def test_add_dummy_feature_csc(): X = sparse.csc_matrix([[1, 0], [0, 1], [0, 1]]) X = add_dummy_feature(X) assert_true(sparse.isspmatrix_csc(X), X) assert_array_equal(X.toarray(), [[1, 1, 0], [1, 0, 1], [1, 0, 1]]) def test_add_dummy_feature_csr(): X = sparse.csr_matrix([[1, 0], [0, 1], [0, 1]]) X = add_dummy_feature(X) assert_true(sparse.isspmatrix_csr(X), X) assert_array_equal(X.toarray(), [[1, 1, 0], [1, 0, 1], [1, 0, 1]]) def test_one_hot_encoder_sparse(): # Test OneHotEncoder's fit and transform. X = [[3, 2, 1], [0, 1, 1]] enc = OneHotEncoder() # discover max values automatically X_trans = enc.fit_transform(X).toarray() assert_equal(X_trans.shape, (2, 5)) assert_array_equal(enc.active_features_, np.where([1, 0, 0, 1, 0, 1, 1, 0, 1])[0]) assert_array_equal(enc.feature_indices_, [0, 4, 7, 9]) # check outcome assert_array_equal(X_trans, [[0., 1., 0., 1., 1.], [1., 0., 1., 0., 1.]]) # max value given as 3 enc = OneHotEncoder(n_values=4) X_trans = enc.fit_transform(X) assert_equal(X_trans.shape, (2, 4 * 3)) assert_array_equal(enc.feature_indices_, [0, 4, 8, 12]) # max value given per feature enc = OneHotEncoder(n_values=[3, 2, 2]) X = [[1, 0, 1], [0, 1, 1]] X_trans = enc.fit_transform(X) assert_equal(X_trans.shape, (2, 3 + 2 + 2)) assert_array_equal(enc.n_values_, [3, 2, 2]) # check that testing with larger feature works: X = np.array([[2, 0, 1], [0, 1, 1]]) enc.transform(X) # test that an error is raised when out of bounds: X_too_large = [[0, 2, 1], [0, 1, 1]] assert_raises(ValueError, enc.transform, X_too_large) assert_raises(ValueError, OneHotEncoder(n_values=2).fit_transform, X) # test that error is raised when wrong number of features assert_raises(ValueError, enc.transform, X[:, :-1]) # test that error is raised when wrong number of features in fit # with prespecified n_values assert_raises(ValueError, enc.fit, X[:, :-1]) # test exception on wrong init param assert_raises(TypeError, OneHotEncoder(n_values=np.int).fit, X) enc = OneHotEncoder() # test negative input to fit assert_raises(ValueError, enc.fit, [[0], [-1]]) # test negative input to transform enc.fit([[0], [1]]) assert_raises(ValueError, enc.transform, [[0], [-1]]) def test_one_hot_encoder_dense(): # check for sparse=False X = [[3, 2, 1], [0, 1, 1]] enc = OneHotEncoder(sparse=False) # discover max values automatically X_trans = enc.fit_transform(X) assert_equal(X_trans.shape, (2, 5)) assert_array_equal(enc.active_features_, np.where([1, 0, 0, 1, 0, 1, 1, 0, 1])[0]) assert_array_equal(enc.feature_indices_, [0, 4, 7, 9]) # check outcome assert_array_equal(X_trans, np.array([[0., 1., 0., 1., 1.], [1., 0., 1., 0., 1.]])) def _check_transform_selected(X, X_expected, sel): for M in (X, sparse.csr_matrix(X)): Xtr = _transform_selected(M, Binarizer().transform, sel) assert_array_equal(toarray(Xtr), X_expected) def test_transform_selected(): X = [[3, 2, 1], [0, 1, 1]] X_expected = [[1, 2, 1], [0, 1, 1]] _check_transform_selected(X, X_expected, [0]) _check_transform_selected(X, X_expected, [True, False, False]) X_expected = [[1, 1, 1], [0, 1, 1]] _check_transform_selected(X, X_expected, [0, 1, 2]) _check_transform_selected(X, X_expected, [True, True, True]) _check_transform_selected(X, X_expected, "all") _check_transform_selected(X, X, []) _check_transform_selected(X, X, [False, False, False]) def _run_one_hot(X, X2, cat): enc = OneHotEncoder(categorical_features=cat) Xtr = enc.fit_transform(X) X2tr = enc.transform(X2) return Xtr, X2tr def _check_one_hot(X, X2, cat, n_features): ind = np.where(cat)[0] # With mask A, B = _run_one_hot(X, X2, cat) # With indices C, D = _run_one_hot(X, X2, ind) # Check shape assert_equal(A.shape, (2, n_features)) assert_equal(B.shape, (1, n_features)) assert_equal(C.shape, (2, n_features)) assert_equal(D.shape, (1, n_features)) # Check that mask and indices give the same results assert_array_equal(toarray(A), toarray(C)) assert_array_equal(toarray(B), toarray(D)) def test_one_hot_encoder_categorical_features(): X = np.array([[3, 2, 1], [0, 1, 1]]) X2 = np.array([[1, 1, 1]]) cat = [True, False, False] _check_one_hot(X, X2, cat, 4) # Edge case: all non-categorical cat = [False, False, False] _check_one_hot(X, X2, cat, 3) # Edge case: all categorical cat = [True, True, True] _check_one_hot(X, X2, cat, 5) def test_one_hot_encoder_unknown_transform(): X = np.array([[0, 2, 1], [1, 0, 3], [1, 0, 2]]) y = np.array([[4, 1, 1]]) # Test that one hot encoder raises error for unknown features # present during transform. oh = OneHotEncoder(handle_unknown='error') oh.fit(X) assert_raises(ValueError, oh.transform, y) # Test the ignore option, ignores unknown features. oh = OneHotEncoder(handle_unknown='ignore') oh.fit(X) assert_array_equal( oh.transform(y).toarray(), np.array([[0., 0., 0., 0., 1., 0., 0.]]) ) # Raise error if handle_unknown is neither ignore or error. oh = OneHotEncoder(handle_unknown='42') oh.fit(X) assert_raises(ValueError, oh.transform, y)
bsd-3-clause
DonBeo/scikit-learn
examples/linear_model/plot_sgd_loss_functions.py
248
1095
""" ========================== SGD: convex loss functions ========================== A plot that compares the various convex loss functions supported by :class:`sklearn.linear_model.SGDClassifier` . """ print(__doc__) import numpy as np import matplotlib.pyplot as plt def modified_huber_loss(y_true, y_pred): z = y_pred * y_true loss = -4 * z loss[z >= -1] = (1 - z[z >= -1]) ** 2 loss[z >= 1.] = 0 return loss xmin, xmax = -4, 4 xx = np.linspace(xmin, xmax, 100) plt.plot([xmin, 0, 0, xmax], [1, 1, 0, 0], 'k-', label="Zero-one loss") plt.plot(xx, np.where(xx < 1, 1 - xx, 0), 'g-', label="Hinge loss") plt.plot(xx, -np.minimum(xx, 0), 'm-', label="Perceptron loss") plt.plot(xx, np.log2(1 + np.exp(-xx)), 'r-', label="Log loss") plt.plot(xx, np.where(xx < 1, 1 - xx, 0) ** 2, 'b-', label="Squared hinge loss") plt.plot(xx, modified_huber_loss(xx, 1), 'y--', label="Modified Huber loss") plt.ylim((0, 8)) plt.legend(loc="upper right") plt.xlabel(r"Decision function $f(x)$") plt.ylabel("$L(y, f(x))$") plt.show()
bsd-3-clause
arabenjamin/scikit-learn
examples/decomposition/plot_incremental_pca.py
243
1878
""" =============== Incremental PCA =============== Incremental principal component analysis (IPCA) is typically used as a replacement for principal component analysis (PCA) when the dataset to be decomposed is too large to fit in memory. IPCA builds a low-rank approximation for the input data using an amount of memory which is independent of the number of input data samples. It is still dependent on the input data features, but changing the batch size allows for control of memory usage. This example serves as a visual check that IPCA is able to find a similar projection of the data to PCA (to a sign flip), while only processing a few samples at a time. This can be considered a "toy example", as IPCA is intended for large datasets which do not fit in main memory, requiring incremental approaches. """ print(__doc__) # Authors: Kyle Kastner # License: BSD 3 clause import numpy as np import matplotlib.pyplot as plt from sklearn.datasets import load_iris from sklearn.decomposition import PCA, IncrementalPCA iris = load_iris() X = iris.data y = iris.target n_components = 2 ipca = IncrementalPCA(n_components=n_components, batch_size=10) X_ipca = ipca.fit_transform(X) pca = PCA(n_components=n_components) X_pca = pca.fit_transform(X) for X_transformed, title in [(X_ipca, "Incremental PCA"), (X_pca, "PCA")]: plt.figure(figsize=(8, 8)) for c, i, target_name in zip("rgb", [0, 1, 2], iris.target_names): plt.scatter(X_transformed[y == i, 0], X_transformed[y == i, 1], c=c, label=target_name) if "Incremental" in title: err = np.abs(np.abs(X_pca) - np.abs(X_ipca)).mean() plt.title(title + " of iris dataset\nMean absolute unsigned error " "%.6f" % err) else: plt.title(title + " of iris dataset") plt.legend(loc="best") plt.axis([-4, 4, -1.5, 1.5]) plt.show()
bsd-3-clause
ChadFulton/statsmodels
statsmodels/base/tests/test_transform.py
7
4469
import numpy as np from numpy.testing import (assert_almost_equal, assert_equal, assert_raises) from statsmodels.base.transform import (BoxCox) from statsmodels.datasets import macrodata class TestTransform: @classmethod def setup_class(cls): data = macrodata.load_pandas() cls.x = data.data['realgdp'].values cls.bc = BoxCox() def test_nonpositive(self): # Testing negative values y = [1, -1, 1] assert_raises(ValueError, self.bc.transform_boxcox, y) # Testing nonzero y = [1, 0, 1] assert_raises(ValueError, self.bc.transform_boxcox, y) def test_invalid_bounds(self): # more than two bounds assert_raises(ValueError, self.bc._est_lambda, self.x, (-3, 2, 3)) # upper bound <= lower bound assert_raises(ValueError, self.bc._est_lambda, self.x, (2, -1)) def test_unclear_methods(self): # Both _est_lambda and untransform have a method argument that should # be tested. assert_raises(ValueError, self.bc._est_lambda, self.x, (-1, 2), 'test') assert_raises(ValueError, self.bc.untransform_boxcox, self.x, 1, 'test') def test_unclear_scale_parameter(self): # bc.guerrero allows for 'mad' and 'sd', for the MAD and Standard # Deviation, respectively assert_raises(ValueError, self.bc._est_lambda, self.x, scale='test') # Next, check if mad/sd work: self.bc._est_lambda(self.x, scale='mad') self.bc._est_lambda(self.x, scale='MAD') self.bc._est_lambda(self.x, scale='sd') self.bc._est_lambda(self.x, scale='SD') def test_valid_guerrero(self): # `l <- BoxCox.lambda(x, method="guerrero")` on a ts object # with frequency 4 (BoxCox.lambda defaults to 2, but we use # Guerrero and Perera (2004) as a guideline) lmbda = self.bc._est_lambda(self.x, method='guerrero', window_length=4) assert_almost_equal(lmbda, 0.507624, 4) # `l <- BoxCox.lambda(x, method="guerrero")` with the default grouping # parameter (namely, window_length=2). lmbda = self.bc._est_lambda(self.x, method='guerrero', window_length=2) assert_almost_equal(lmbda, 0.513893, 4) def test_guerrero_robust_scale(self): # The lambda is derived from a manual check of the values for the MAD. # Compare also the result for the standard deviation on R=4: 0.5076, # i.e. almost the same value. lmbda = self.bc._est_lambda(self.x, scale='mad') assert_almost_equal(lmbda, 0.488621, 4) def test_loglik_lambda_estimation(self): # 0.2 is the value returned by `BoxCox.lambda(x, method="loglik")` lmbda = self.bc._est_lambda(self.x, method='loglik') assert_almost_equal(lmbda, 0.2, 1) def test_boxcox_transformation_methods(self): # testing estimated lambda vs. provided. Should result in almost # the same transformed data. Value taken from R. y_transformed_no_lambda = self.bc.transform_boxcox(self.x) y_transformed_lambda = self.bc.transform_boxcox(self.x, 0.507624) assert_almost_equal(y_transformed_no_lambda[0], y_transformed_lambda[0], 3) # a perfectly increasing set has a constant variance over the entire # series, hence stabilising should result in the same scale: lmbda = 1. y, lmbda = self.bc.transform_boxcox(np.arange(1, 100)) assert_almost_equal(lmbda, 1., 5) def test_zero_lambda(self): # zero lambda should be a log transform. y_transform_zero_lambda, lmbda = self.bc.transform_boxcox(self.x, 0.) assert_equal(lmbda, 0.) assert_almost_equal(y_transform_zero_lambda, np.log(self.x), 5) def test_naive_back_transformation(self): # test both transformations functions -> 0. and .5 y_zero_lambda = self.bc.transform_boxcox(self.x, 0.) y_half_lambda = self.bc.transform_boxcox(self.x, .5) y_zero_lambda_un = self.bc.untransform_boxcox(*y_zero_lambda, method='naive') y_half_lambda_un = self.bc.untransform_boxcox(*y_half_lambda, method='naive') assert_almost_equal(self.x, y_zero_lambda_un, 5) assert_almost_equal(self.x, y_half_lambda_un, 5)
bsd-3-clause
JuliaSprenger/python-neo
neo/test/rawiotest/common_rawio_test.py
5
3793
''' Common tests for RawIOs: It is copy/paste from neo/test/iotests/common_io_test.py The code should be shared for common parts. The public URL is in url_for_tests. To deposite new testing files, please create a account at gin.g-node.org and upload files at NeuralEnsemble/ephy_testing_data data repo. ''' __test__ = False import logging import unittest from neo.utils.datasets import (download_dataset, get_local_testing_data_folder, default_testing_repo) from neo.test.rawiotest.tools import can_use_network from neo.test.rawiotest import rawio_compliance as compliance try: import datalad HAVE_DATALAD = True except: HAVE_DATALAD = False # url_for_tests = "https://portal.g-node.org/neo/" #This is the old place repo_for_test = default_testing_repo class BaseTestRawIO: ''' This class make common tests for all IOs. Basically download files from G-node portal. And test the IO is working. ''' # ~ __test__ = False # all IO test need to modify this: rawioclass = None # the IOclass to be tested entities_to_test = [] # list of files to test compliances entities_to_download = [] # when files are at gin # allow environment to tell avoid using network use_network = can_use_network() local_test_dir = get_local_testing_data_folder() def setUp(self): ''' Set up the test fixture. This is run for every test ''' self.shortname = self.rawioclass.__name__.lower().replace('rawio', '') if HAVE_DATALAD: for remote_path in self.entities_to_download: download_dataset(repo=repo_for_test, remote_path=remote_path) else: raise unittest.SkipTest("Requires datalad download of data from the web") def get_local_base_folder(self): return get_local_testing_data_folder() def get_local_path(self, sub_path): root_local_path = self.get_local_base_folder() local_path = root_local_path / sub_path # TODO later : remove the str when all IOs handle the pathlib.Path objects local_path = str(local_path) return local_path def test_read_all(self): # Read all file in self.entities_to_test if not HAVE_DATALAD: return for entity_name in self.entities_to_test: # entity_name = self.get_filename_path(entity_name) # local path is a folder or a file local_path = self.get_local_path(entity_name) if self.rawioclass.rawmode.endswith('-file'): reader = self.rawioclass(filename=local_path) elif self.rawioclass.rawmode.endswith('-dir'): reader = self.rawioclass(dirname=local_path) txt = reader.__repr__() assert 'nb_block' not in txt, 'Before parser_header() nb_block should be NOT known' reader.parse_header() txt = reader.__repr__() assert 'nb_block' in txt, 'After parser_header() nb_block should be known' # print(txt) # txt = reader._repr_annotations() # reader.print_annotations() # lanch a series of test compliance compliance.header_is_total(reader) compliance.count_element(reader) compliance.read_analogsignals(reader) compliance.read_spike_times(reader) compliance.read_spike_waveforms(reader) compliance.read_events(reader) compliance.has_annotations(reader) # basic benchmark level = logging.getLogger().getEffectiveLevel() logging.getLogger().setLevel(logging.INFO) compliance.benchmark_speed_read_signals(reader) logging.getLogger().setLevel(level)
bsd-3-clause
joeshaw/luigi
test/contrib/bigquery_test.py
21
4218
# -*- coding: utf-8 -*- # # Copyright 2015 Twitter Inc # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # """This is an integration test for the Bigquery-luigi binding. This test requires credentials that can access GCS & access to a bucket below. Follow the directions in the gcloud tools to set up local credentials. """ import json import os import luigi from luigi.contrib import bigquery from contrib import gcs_test from nose.plugins.attrib import attr PROJECT_ID = gcs_test.PROJECT_ID DATASET_ID = os.environ.get('BQ_TEST_DATASET_ID', 'luigi_tests') @attr('gcloud') class TestLoadTask(bigquery.BigqueryLoadTask): _BIGQUERY_CLIENT = None source = luigi.Parameter() table = luigi.Parameter() @property def schema(self): return [ {'mode': 'NULLABLE', 'name': 'field1', 'type': 'STRING'}, {'mode': 'NULLABLE', 'name': 'field2', 'type': 'INTEGER'}, ] def source_uris(self): return [self.source] def output(self): return bigquery.BigqueryTarget(PROJECT_ID, DATASET_ID, self.table, client=self._BIGQUERY_CLIENT) @attr('gcloud') class TestRunQueryTask(bigquery.BigqueryRunQueryTask): _BIGQUERY_CLIENT = None query = ''' SELECT 'hello' as field1, 2 as field2 ''' table = luigi.Parameter() def output(self): return bigquery.BigqueryTarget(PROJECT_ID, DATASET_ID, self.table, client=self._BIGQUERY_CLIENT) @attr('gcloud') class BigqueryTest(gcs_test._GCSBaseTestCase): def setUp(self): super(BigqueryTest, self).setUp() self.bq_client = bigquery.BigqueryClient(gcs_test.CREDENTIALS) self.table = bigquery.BQTable(project_id=PROJECT_ID, dataset_id=DATASET_ID, table_id=self.id().split('.')[-1]) self.addCleanup(self.bq_client.delete_table, self.table) def create_dataset(self, data=[]): self.bq_client.delete_table(self.table) text = '\n'.join(map(json.dumps, data)) gcs_file = gcs_test.bucket_url(self.id()) self.client.put_string(text, gcs_file) task = TestLoadTask(source=gcs_file, table=self.table.table_id) task._BIGQUERY_CLIENT = self.bq_client task.run() def test_table_uri(self): intended_uri = "bq://" + PROJECT_ID + "/" + \ DATASET_ID + "/" + self.table.table_id self.assertTrue(self.table.uri == intended_uri) def test_load_and_copy(self): self.create_dataset([ {'field1': 'hi', 'field2': 1}, {'field1': 'bye', 'field2': 2}, ]) # Cram some stuff in here to make the tests run faster - loading data takes a while! self.assertTrue(self.bq_client.dataset_exists(self.table)) self.assertTrue(self.bq_client.table_exists(self.table)) self.assertIn(self.table.dataset_id, list(self.bq_client.list_datasets(self.table.project_id))) self.assertIn(self.table.table_id, list(self.bq_client.list_tables(self.table.dataset))) new_table = self.table._replace(table_id=self.table.table_id + '_copy') self.bq_client.copy( source_table=self.table, dest_table=new_table ) self.assertTrue(self.bq_client.table_exists(new_table)) self.bq_client.delete_table(new_table) self.assertFalse(self.bq_client.table_exists(new_table)) def test_run_query(self): task = TestRunQueryTask(table=self.table.table_id) task._BIGQUERY_CLIENT = self.bq_client task.run() self.assertTrue(self.bq_client.table_exists(self.table))
apache-2.0
ChadFulton/statsmodels
statsmodels/graphics/tests/test_functional.py
1
7687
from statsmodels.compat.python import range import numpy as np from numpy.testing import assert_equal, assert_almost_equal import pytest from statsmodels.datasets import elnino from statsmodels.graphics.functional import \ hdrboxplot, banddepth, fboxplot, rainbowplot try: import matplotlib.pyplot as plt except ImportError: pass data = elnino.load(as_pandas=False) labels = data.raw_data[:, 0].astype(int) data = data.raw_data[:, 1:] @pytest.mark.matplotlib def test_hdr_basic(close_figures): _, hdr = hdrboxplot(data, labels=labels, seed=12345) assert len(hdr.extra_quantiles) == 0 median_t = [24.247, 25.625, 25.964, 24.999, 23.648, 22.302, 21.231, 20.366, 20.168, 20.434, 21.111, 22.299] assert_almost_equal(hdr.median, median_t, decimal=2) quant = np.vstack([hdr.outliers, hdr.hdr_90, hdr.hdr_50]) quant_t = np.vstack([[24.36, 25.42, 25.40, 24.96, 24.21, 23.35, 22.50, 21.89, 22.04, 22.88, 24.57, 25.89], [27.25, 28.23, 28.85, 28.82, 28.37, 27.43, 25.73, 23.88, 22.26, 22.22, 22.21, 23.19], [23.70, 26.08, 27.17, 26.74, 26.77, 26.15, 25.59, 24.95, 24.69, 24.64, 25.85, 27.08], [28.12, 28.82, 29.24, 28.45, 27.36, 25.19, 23.61, 22.27, 21.31, 21.37, 21.60, 22.81], [25.48, 26.99, 27.51, 27.04, 26.23, 24.94, 23.69, 22.72, 22.26, 22.64, 23.33, 24.44], [23.11, 24.50, 24.66, 23.44, 21.74, 20.58, 19.68, 18.84, 18.76, 18.99, 19.66, 20.86], [24.84, 26.23, 26.67, 25.93, 24.87, 23.57, 22.46, 21.45, 21.26, 21.57, 22.14, 23.41], [23.62, 25.10, 25.34, 24.22, 22.74, 21.52, 20.40, 19.56, 19.63, 19.67, 20.37, 21.76]]) assert_almost_equal(quant, quant_t, decimal=0) labels_pos = np.all(np.in1d(data, hdr.outliers).reshape(data.shape), axis=1) outliers = labels[labels_pos] assert_equal([1982, 1983, 1997, 1998], outliers) assert_equal(labels[hdr.outliers_idx], outliers) @pytest.mark.matplotlib def test_hdr_basic_brute(close_figures): _, hdr = hdrboxplot(data, labels=labels, use_brute=True) assert len(hdr.extra_quantiles) == 0 median_t = [24.247, 25.625, 25.964, 24.999, 23.648, 22.302, 21.231, 20.366, 20.168, 20.434, 21.111, 22.299] assert_almost_equal(hdr.median, median_t, decimal=2) @pytest.mark.matplotlib def test_hdr_plot(close_figures): fig = plt.figure() ax = fig.add_subplot(111) hdrboxplot(data, labels=labels.tolist(), ax=ax, threshold=1, seed=12345) ax.set_xlabel("Month of the year") ax.set_ylabel("Sea surface temperature (C)") ax.set_xticks(np.arange(13, step=3) - 1) ax.set_xticklabels(["", "Mar", "Jun", "Sep", "Dec"]) ax.set_xlim([-0.2, 11.2]) @pytest.mark.matplotlib def test_hdr_alpha(close_figures): _, hdr = hdrboxplot(data, alpha=[0.7], seed=12345) extra_quant_t = np.vstack([[25.1, 26.5, 27.0, 26.4, 25.4, 24.1, 23.0, 22.0, 21.7, 22.1, 22.7, 23.8], [23.4, 24.8, 25.0, 23.9, 22.4, 21.1, 20.0, 19.3, 19.2, 19.4, 20.1, 21.3]]) assert_almost_equal(hdr.extra_quantiles, extra_quant_t, decimal=0) @pytest.mark.matplotlib def test_hdr_multiple_alpha(close_figures): _, hdr = hdrboxplot(data, alpha=[0.4, 0.92], seed=12345) extra_quant_t = [[25.712, 27.052, 27.711, 27.200, 26.162, 24.833, 23.639, 22.378, 22.250, 22.640, 23.472, 24.649], [22.973, 24.526, 24.608, 23.343, 21.908, 20.655, 19.750, 19.046, 18.812, 18.989, 19.520, 20.685], [24.667, 26.033, 26.416, 25.584, 24.308, 22.849, 21.684, 20.948, 20.483, 21.019, 21.751, 22.890], [23.873, 25.371, 25.667, 24.644, 23.177, 21.923, 20.791, 20.015, 19.697, 19.951, 20.622, 21.858]] assert_almost_equal(hdr.extra_quantiles, np.vstack(extra_quant_t), decimal=0) @pytest.mark.matplotlib def test_hdr_threshold(close_figures): _, hdr = hdrboxplot(data, alpha=[0.8], threshold=0.93, seed=12345) labels_pos = np.all(np.in1d(data, hdr.outliers).reshape(data.shape), axis=1) outliers = labels[labels_pos] assert_equal([1968, 1982, 1983, 1997, 1998], outliers) @pytest.mark.matplotlib def test_hdr_bw(close_figures): _, hdr = hdrboxplot(data, bw='cv_ml', seed=12345) median_t = [24.25, 25.64, 25.99, 25.04, 23.71, 22.38, 21.31, 20.44, 20.24, 20.51, 21.19, 22.38] assert_almost_equal(hdr.median, median_t, decimal=2) @pytest.mark.matplotlib def test_hdr_ncomp(close_figures): _, hdr = hdrboxplot(data, ncomp=3, seed=12345) median_t = [24.33, 25.71, 26.04, 25.08, 23.74, 22.40, 21.32, 20.45, 20.25, 20.53, 21.20, 22.39] assert_almost_equal(hdr.median, median_t, decimal=2) def test_banddepth_BD2(): xx = np.arange(500) / 150. y1 = 1 + 0.5 * np.sin(xx) y2 = 0.3 + np.sin(xx + np.pi/6) y3 = -0.5 + np.sin(xx + np.pi/6) y4 = -1 + 0.3 * np.cos(xx + np.pi/6) data = np.asarray([y1, y2, y3, y4]) depth = banddepth(data, method='BD2') expected_depth = [0.5, 5./6, 5./6, 0.5] assert_almost_equal(depth, expected_depth) # Plot to visualize why we expect this output # fig = plt.figure() # ax = fig.add_subplot(111) # for ii, yy in enumerate([y1, y2, y3, y4]): # ax.plot(xx, yy, label="y%s" % ii) # ax.legend() # plt.close(fig) def test_banddepth_MBD(): xx = np.arange(5001) / 5000. y1 = np.zeros(xx.shape) y2 = 2 * xx - 1 y3 = np.ones(xx.shape) * 0.5 y4 = np.ones(xx.shape) * -0.25 data = np.asarray([y1, y2, y3, y4]) depth = banddepth(data, method='MBD') expected_depth = [5./6, (2*(0.75-3./8)+3)/6, 3.5/6, (2*3./8+3)/6] assert_almost_equal(depth, expected_depth, decimal=4) @pytest.mark.matplotlib def test_fboxplot_rainbowplot(close_figures): # Test fboxplot and rainbowplot together, is much faster. def harmfunc(t): """Test function, combination of a few harmonic terms.""" # Constant, 0 with p=0.9, 1 with p=1 - for creating outliers ci = int(np.random.random() > 0.9) a1i = np.random.random() * 0.05 a2i = np.random.random() * 0.05 b1i = (0.15 - 0.1) * np.random.random() + 0.1 b2i = (0.15 - 0.1) * np.random.random() + 0.1 func = (1 - ci) * (a1i * np.sin(t) + a2i * np.cos(t)) + \ ci * (b1i * np.sin(t) + b2i * np.cos(t)) return func np.random.seed(1234567) # Some basic test data, Model 6 from Sun and Genton. t = np.linspace(0, 2 * np.pi, 250) data = [harmfunc(t) for _ in range(20)] # fboxplot test fig = plt.figure() ax = fig.add_subplot(111) _, depth, ix_depth, ix_outliers = fboxplot(data, wfactor=2, ax=ax) ix_expected = np.array([13, 4, 15, 19, 8, 6, 3, 16, 9, 7, 1, 5, 2, 12, 17, 11, 14, 10, 0, 18]) assert_equal(ix_depth, ix_expected) ix_expected2 = np.array([2, 11, 17, 18]) assert_equal(ix_outliers, ix_expected2) # rainbowplot test (re-uses depth variable) xdata = np.arange(data[0].size) fig = rainbowplot(data, xdata=xdata, depth=depth, cmap=plt.cm.rainbow)
bsd-3-clause
iarroyof/sentence_embedding
deprecated/document_classification.py
1
11099
""" ====================================================== Classification of text documents using sparse features ====================================================== This is an example showing how scikit-learn can be used to classify documents by topics using a bag-of-words approach. This example uses a scipy.sparse matrix to store the features and demonstrates various classifiers that can efficiently handle sparse matrices. The dataset used in this example is the 20 newsgroups dataset. It will be automatically downloaded, then cached. The bar plot indicates the accuracy, training time (normalized) and test time (normalized) of each classifier. """ # Author: Peter Prettenhofer <peter.prettenhofer@gmail.com> # Olivier Grisel <olivier.grisel@ensta.org> # Mathieu Blondel <mathieu@mblondel.org> # Lars Buitinck # License: BSD 3 clause from __future__ import print_function import logging import numpy as np from optparse import OptionParser import sys from time import time import matplotlib.pyplot as plt from sklearn.datasets import fetch_20newsgroups from sklearn.feature_extraction.text import TfidfVectorizer from sklearn.feature_extraction.text import HashingVectorizer from sklearn.feature_selection import SelectFromModel from sklearn.feature_selection import SelectKBest, chi2 from sklearn.linear_model import RidgeClassifier from sklearn.pipeline import Pipeline from sklearn.svm import LinearSVC from sklearn.linear_model import SGDClassifier from sklearn.linear_model import Perceptron from sklearn.linear_model import PassiveAggressiveClassifier from sklearn.naive_bayes import BernoulliNB, MultinomialNB from sklearn.neighbors import KNeighborsClassifier from sklearn.neighbors import NearestCentroid from sklearn.ensemble import RandomForestClassifier from sklearn.utils.extmath import density from sklearn import metrics from sklearn.datasets import load_files # Display progress logs on stdout logging.basicConfig(level=logging.INFO, format='%(asctime)s %(levelname)s %(message)s') # parse commandline arguments op = OptionParser() op.add_option("--report", action="store_true", dest="print_report", help="Print a detailed classification report.") op.add_option("--chi2_select", action="store", type="int", dest="select_chi2", help="Select some number of features using a chi-squared test") op.add_option("--confusion_matrix", action="store_true", dest="print_cm", help="Print the confusion matrix.") op.add_option("--top10", action="store_true", dest="print_top10", help="Print ten most discriminative terms per class" " for every classifier.") op.add_option("--all_categories", action="store_true", dest="all_categories", help="Whether to use all categories or not.") op.add_option("--use_hashing", action="store_true", help="Use a hashing vectorizer.") op.add_option("--n_features", action="store", type=int, default=2 ** 16, help="n_features when using the hashing vectorizer.") op.add_option("--filtered", action="store_true", help="Remove newsgroup information that is easily overfit: " "headers, signatures, and quoting.") op.add_option("--data", action="store_true", help="Dataset directory. If '20news' specified then load it. ") def is_interactive(): return not hasattr(sys.modules['__main__'], '__file__') # work-around for Jupyter notebook and IPython console argv = [] if is_interactive() else sys.argv[1:] (opts, args) = op.parse_args(argv) if len(args) > 0: op.error("this script takes no arguments.") sys.exit(1) print(__doc__) op.print_help() print() # ############################################################################# # Load some categories from the training set if opts.data == "20news": if opts.all_categories: categories = None else: categories = [ 'alt.atheism', 'talk.religion.misc', 'comp.graphics', 'sci.space', ] if opts.filtered: remove = ('headers', 'footers', 'quotes') else: remove = () if opts.data == "20news": print("Loading dataset for categories:") print(categories if categories else "all") data_train = fetch_20newsgroups(subset='train', categories=categories, shuffle=True, random_state=42, remove=remove) data_test = fetch_20newsgroups(subset='test', categories=categories, shuffle=True, random_state=42, remove=remove) else: load_files = print('data loaded') # order of labels in `target_names` can be different from `categories` target_names = data_train.target_names def size_mb(docs): return sum(len(s.encode('utf-8')) for s in docs) / 1e6 data_train_size_mb = size_mb(data_train.data) data_test_size_mb = size_mb(data_test.data) print("%d documents - %0.3fMB (training set)" % ( len(data_train.data), data_train_size_mb)) print("%d documents - %0.3fMB (test set)" % ( len(data_test.data), data_test_size_mb)) print("%d categories" % len(categories)) print() # split a training set and a test set y_train, y_test = data_train.target, data_test.target print("Extracting features from the training data using a sparse vectorizer") t0 = time() if opts.use_hashing: vectorizer = HashingVectorizer(stop_words='english', alternate_sign=False, n_features=opts.n_features) X_train = vectorizer.transform(data_train.data) else: vectorizer = TfidfVectorizer(sublinear_tf=True, max_df=0.5, stop_words='english') X_train = vectorizer.fit_transform(data_train.data) duration = time() - t0 print("done in %fs at %0.3fMB/s" % (duration, data_train_size_mb / duration)) print("n_samples: %d, n_features: %d" % X_train.shape) print() print("Extracting features from the test data using the same vectorizer") t0 = time() X_test = vectorizer.transform(data_test.data) duration = time() - t0 print("done in %fs at %0.3fMB/s" % (duration, data_test_size_mb / duration)) print("n_samples: %d, n_features: %d" % X_test.shape) print() # mapping from integer feature name to original token string if opts.use_hashing: feature_names = None else: feature_names = vectorizer.get_feature_names() if opts.select_chi2: print("Extracting %d best features by a chi-squared test" % opts.select_chi2) t0 = time() ch2 = SelectKBest(chi2, k=opts.select_chi2) X_train = ch2.fit_transform(X_train, y_train) X_test = ch2.transform(X_test) if feature_names: # keep selected feature names feature_names = [feature_names[i] for i in ch2.get_support(indices=True)] print("done in %fs" % (time() - t0)) print() if feature_names: feature_names = np.asarray(feature_names) def trim(s): """Trim string to fit on terminal (assuming 80-column display)""" return s if len(s) <= 80 else s[:77] + "..." # ############################################################################# # Benchmark classifiers def benchmark(clf): print('_' * 80) print("Training: ") print(clf) t0 = time() clf.fit(X_train, y_train) train_time = time() - t0 print("train time: %0.3fs" % train_time) t0 = time() pred = clf.predict(X_test) test_time = time() - t0 print("test time: %0.3fs" % test_time) score = metrics.accuracy_score(y_test, pred) print("accuracy: %0.3f" % score) if hasattr(clf, 'coef_'): print("dimensionality: %d" % clf.coef_.shape[1]) print("density: %f" % density(clf.coef_)) if opts.print_top10 and feature_names is not None: print("top 10 keywords per class:") for i, label in enumerate(target_names): top10 = np.argsort(clf.coef_[i])[-10:] print(trim("%s: %s" % (label, " ".join(feature_names[top10])))) print() if opts.print_report: print("classification report:") print(metrics.classification_report(y_test, pred, target_names=target_names)) if opts.print_cm: print("confusion matrix:") print(metrics.confusion_matrix(y_test, pred)) print() clf_descr = str(clf).split('(')[0] return clf_descr, score, train_time, test_time results = [] for clf, name in ( (RidgeClassifier(tol=1e-2, solver="lsqr"), "Ridge Classifier"), (Perceptron(n_iter=50), "Perceptron"), (PassiveAggressiveClassifier(n_iter=50), "Passive-Aggressive"), (KNeighborsClassifier(n_neighbors=10), "kNN"), (RandomForestClassifier(n_estimators=100), "Random forest")): print('=' * 80) print(name) results.append(benchmark(clf)) for penalty in ["l2", "l1"]: print('=' * 80) print("%s penalty" % penalty.upper()) # Train Liblinear model results.append(benchmark(LinearSVC(penalty=penalty, dual=False, tol=1e-3))) # Train SGD model results.append(benchmark(SGDClassifier(alpha=.0001, n_iter=50, penalty=penalty))) # Train SGD with Elastic Net penalty print('=' * 80) print("Elastic-Net penalty") results.append(benchmark(SGDClassifier(alpha=.0001, n_iter=50, penalty="elasticnet"))) # Train NearestCentroid without threshold print('=' * 80) print("NearestCentroid (aka Rocchio classifier)") results.append(benchmark(NearestCentroid())) # Train sparse Naive Bayes classifiers print('=' * 80) print("Naive Bayes") results.append(benchmark(MultinomialNB(alpha=.01))) results.append(benchmark(BernoulliNB(alpha=.01))) print('=' * 80) print("LinearSVC with L1-based feature selection") # The smaller C, the stronger the regularization. # The more regularization, the more sparsity. results.append(benchmark(Pipeline([ ('feature_selection', SelectFromModel(LinearSVC(penalty="l1", dual=False, tol=1e-3))), ('classification', LinearSVC(penalty="l2"))]))) # make some plots indices = np.arange(len(results)) results = [[x[i] for x in results] for i in range(4)] clf_names, score, training_time, test_time = results training_time = np.array(training_time) / np.max(training_time) test_time = np.array(test_time) / np.max(test_time) plt.figure(figsize=(12, 8)) plt.title("Score") plt.barh(indices, score, .2, label="score", color='navy') plt.barh(indices + .3, training_time, .2, label="training time", color='c') plt.barh(indices + .6, test_time, .2, label="test time", color='darkorange') plt.yticks(()) plt.legend(loc='best') plt.subplots_adjust(left=.25) plt.subplots_adjust(top=.95) plt.subplots_adjust(bottom=.05) for i, c in zip(indices, clf_names): plt.text(-.3, i, c) plt.show()
apache-2.0
tensorflow/tensorflow-experimental_link_static_libraries_once
tensorflow/python/data/kernel_tests/zip_test.py
5
10630
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for `tf.data.Dataset.zip()`.""" import collections from absl.testing import parameterized import numpy as np from tensorflow.python.data.experimental.ops import random_access from tensorflow.python.data.kernel_tests import checkpoint_test_base from tensorflow.python.data.kernel_tests import test_base from tensorflow.python.data.ops import dataset_ops from tensorflow.python.framework import combinations from tensorflow.python.framework import errors from tensorflow.python.framework import tensor_shape from tensorflow.python.platform import test try: import attr # pylint:disable=g-import-not-at-top except ImportError: attr = None def _dataset_factory(components): datasets = tuple([ dataset_ops.Dataset.from_tensor_slices(component) for component in components ]) return dataset_ops.Dataset.zip(datasets) class ZipTest(test_base.DatasetTestBase, parameterized.TestCase): @combinations.generate(test_base.default_test_combinations()) def testZipEqual(self): components = [ np.tile(np.array([[1], [2], [3], [4]]), 20), np.tile(np.array([[12], [13], [14], [15]]), 22), np.array([37.0, 38.0, 39.0, 40.0]) ] get_next = self.getNext(_dataset_factory(components)) for i in range(4): results = self.evaluate(get_next()) for component, result_component in zip(components, results): self.assertAllEqual(component[i], result_component) with self.assertRaises(errors.OutOfRangeError): self.evaluate(get_next()) with self.assertRaises(errors.OutOfRangeError): self.evaluate(get_next()) @combinations.generate(test_base.default_test_combinations()) def testZipUnequal(self): components = [[1, 2, 3, 4], [1, 2, 3, 4, 5], [1.0, 2.0]] get_next = self.getNext(_dataset_factory(components)) for i in range(2): results = self.evaluate(get_next()) for component, result_component in zip(components, results): self.assertAllEqual(component[i], result_component) with self.assertRaises(errors.OutOfRangeError): self.evaluate(get_next()) with self.assertRaises(errors.OutOfRangeError): self.evaluate(get_next()) @combinations.generate(test_base.default_test_combinations()) def testNested(self): components = [ np.tile(np.array([[1], [2], [3], [4]]), 20), np.tile(np.array([[12], [13], [14], [15]]), 22), np.array([37.0, 38.0, 39.0, 40.0]) ] datasets = [ dataset_ops.Dataset.from_tensor_slices(component) for component in components ] dataset = dataset_ops.Dataset.zip((datasets[0], (datasets[1], datasets[2]))) self.assertEqual( dataset_ops.get_legacy_output_shapes(dataset), (tensor_shape.TensorShape([20]), (tensor_shape.TensorShape([22]), tensor_shape.TensorShape([])))) get_next = self.getNext(dataset) for i in range(4): result1, (result2, result3) = self.evaluate(get_next()) self.assertAllEqual(components[0][i], result1) self.assertAllEqual(components[1][i], result2) self.assertAllEqual(components[2][i], result3) with self.assertRaises(errors.OutOfRangeError): self.evaluate(get_next()) with self.assertRaises(errors.OutOfRangeError): self.evaluate(get_next()) @combinations.generate(test_base.default_test_combinations()) def testNamedTuple(self): Foo = collections.namedtuple("Foo", ["x", "y"]) x = Foo(x=dataset_ops.Dataset.range(3), y=dataset_ops.Dataset.range(3, 6)) dataset = dataset_ops.Dataset.zip(x) expected = [Foo(x=0, y=3), Foo(x=1, y=4), Foo(x=2, y=5)] self.assertDatasetProduces(dataset, expected) @combinations.generate(test_base.default_test_combinations()) def testAttrs(self): if attr is None: self.skipTest("attr module is not available.") @attr.s class Foo: x = attr.ib() y = attr.ib() x = Foo(x=dataset_ops.Dataset.range(3), y=dataset_ops.Dataset.range(3, 6)) dataset = dataset_ops.Dataset.zip(x) expected = [Foo(x=0, y=3), Foo(x=1, y=4), Foo(x=2, y=5)] self.assertDatasetProduces(dataset, expected) @combinations.generate(test_base.default_test_combinations()) def testName(self): x = dataset_ops.Dataset.from_tensors(4) y = dataset_ops.Dataset.from_tensors(2) dataset = dataset_ops.Dataset.zip((x, y), name="zip") self.assertDatasetProduces(dataset, [(4, 2)]) class ZipCheckpointTest(checkpoint_test_base.CheckpointTestBase, parameterized.TestCase): def _build_dataset(self, arr): components = [ np.tile(np.array([[1], [2], [3], [4]]), 20), np.tile(np.array([[12], [13], [14], [15]]), 22), np.array(arr) ] datasets = [ dataset_ops.Dataset.from_tensor_slices(component) for component in components ] return dataset_ops.Dataset.zip((datasets[0], (datasets[1], datasets[2]))) @combinations.generate( combinations.times( test_base.default_test_combinations(), checkpoint_test_base.default_test_combinations(), combinations.combine(elements=[[37.0, 38.0, 39.0, 40.0], [1.0, 2.0]])) ) def test(self, verify_fn, elements): verify_fn(self, lambda: self._build_dataset(elements), len(elements)) class ZipRandomAccessTest(test_base.DatasetTestBase, parameterized.TestCase): @combinations.generate( combinations.times(test_base.default_test_combinations(), combinations.combine(index=[-1, 3, 4]))) def testInvalidIndex(self, index): dataset = dataset_ops.Dataset.zip( (dataset_ops.Dataset.range(1, 4), dataset_ops.Dataset.range(4, 7))) with self.assertRaises(errors.OutOfRangeError): self.evaluate(random_access.at(dataset, index=index)) @combinations.generate( combinations.times(test_base.default_test_combinations(), combinations.combine(index=[-1, 0]))) def testEmptyDataset(self, index): dataset = dataset_ops.Dataset.zip( datasets=(dataset_ops.Dataset.from_tensor_slices([]), dataset_ops.Dataset.from_tensor_slices([]))) with self.assertRaises(errors.OutOfRangeError): self.evaluate(random_access.at(dataset, index=index)) @combinations.generate( combinations.times(test_base.default_test_combinations())) def testZipBasic(self): dataset = dataset_ops.Dataset.zip( (dataset_ops.Dataset.range(1, 4), dataset_ops.Dataset.range(4, 7))) expected_dataset = [(1, 4), (2, 5), (3, 6)] for i in range(3): self.assertEqual( self.evaluate(random_access.at(dataset, index=i)), expected_dataset[i]) @combinations.generate( combinations.times(test_base.default_test_combinations())) def testZipEqual(self): components = [ np.tile(np.array([[1], [2], [3], [4]]), 20), np.tile(np.array([[12], [13], [14], [15]]), 22), np.array([37.0, 38.0, 39.0, 40.0]) ] dataset = _dataset_factory(components) for i in range(4): results = self.evaluate(random_access.at(dataset, index=i)) for component, result_component in zip(components, results): self.assertAllEqual(component[i], result_component) with self.assertRaises(errors.OutOfRangeError): self.evaluate(random_access.at(dataset, index=4)) @combinations.generate(test_base.default_test_combinations()) def testZipUnequal(self): components = [[1, 2, 3, 4], [1, 2, 3, 4, 5], [1.0, 2.0]] dataset = _dataset_factory(components) for i in range(2): results = self.evaluate(random_access.at(dataset, index=i)) for component, result_component in zip(components, results): self.assertAllEqual(component[i], result_component) with self.assertRaises(errors.OutOfRangeError): self.evaluate(random_access.at(dataset, index=2)) @combinations.generate(test_base.default_test_combinations()) def testNested(self): components = [ np.tile(np.array([[1], [2], [3], [4]]), 20), np.tile(np.array([[12], [13], [14], [15]]), 22), np.array([37.0, 38.0, 39.0, 40.0]) ] datasets = [ dataset_ops.Dataset.from_tensor_slices(component) for component in components ] dataset = dataset_ops.Dataset.zip((datasets[0], (datasets[1], datasets[2]))) for i in range(4): result1, (result2, result3) = self.evaluate(random_access.at(dataset, index=i)) self.assertAllEqual(components[0][i], result1) self.assertAllEqual(components[1][i], result2) self.assertAllEqual(components[2][i], result3) with self.assertRaises(errors.OutOfRangeError): self.evaluate(random_access.at(dataset, index=4)) @combinations.generate(test_base.default_test_combinations()) def testNamedTuple(self): Foo = collections.namedtuple("Foo", ["x", "y"]) x = Foo(x=dataset_ops.Dataset.range(3), y=dataset_ops.Dataset.range(3, 6)) dataset = dataset_ops.Dataset.zip(x) expected = [Foo(x=0, y=3), Foo(x=1, y=4), Foo(x=2, y=5)] for i in range(3): self.assertAllEqual( self.evaluate(random_access.at(dataset, index=i)), expected[i]) with self.assertRaises(errors.OutOfRangeError): self.evaluate(random_access.at(dataset, index=4)) @combinations.generate(test_base.default_test_combinations()) def testAttrs(self): if attr is None: self.skipTest("attr module is not available.") @attr.s class Foo: x = attr.ib() y = attr.ib() x = Foo(x=dataset_ops.Dataset.range(3), y=dataset_ops.Dataset.range(3, 6)) dataset = dataset_ops.Dataset.zip(x) expected = [Foo(x=0, y=3), Foo(x=1, y=4), Foo(x=2, y=5)] for i in range(3): self.assertAllEqual( self.evaluate(random_access.at(dataset, index=i)), expected[i]) with self.assertRaises(errors.OutOfRangeError): self.evaluate(random_access.at(dataset, index=4)) if __name__ == "__main__": test.main()
apache-2.0
michellemorales/OpenMM
models/differential_privacy/dp_sgd/dp_mnist/dp_mnist.py
15
21114
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Example differentially private trainer and evaluator for MNIST. """ from __future__ import division import json import os import sys import time import numpy as np import tensorflow as tf from differential_privacy.dp_sgd.dp_optimizer import dp_optimizer from differential_privacy.dp_sgd.dp_optimizer import dp_pca from differential_privacy.dp_sgd.dp_optimizer import sanitizer from differential_privacy.dp_sgd.dp_optimizer import utils from differential_privacy.privacy_accountant.tf import accountant # parameters for the training tf.flags.DEFINE_integer("batch_size", 600, "The training batch size.") tf.flags.DEFINE_integer("batches_per_lot", 1, "Number of batches per lot.") # Together, batch_size and batches_per_lot determine lot_size. tf.flags.DEFINE_integer("num_training_steps", 50000, "The number of training steps." "This counts number of lots.") tf.flags.DEFINE_bool("randomize", True, "If true, randomize the input data; otherwise use a fixed " "seed and non-randomized input.") tf.flags.DEFINE_bool("freeze_bottom_layers", False, "If true, only train on the logit layer.") tf.flags.DEFINE_bool("save_mistakes", False, "If true, save the mistakes made during testing.") tf.flags.DEFINE_float("lr", 0.05, "start learning rate") tf.flags.DEFINE_float("end_lr", 0.05, "end learning rate") tf.flags.DEFINE_float("lr_saturate_epochs", 0, "learning rate saturate epochs; set to 0 for a constant " "learning rate of --lr.") # For searching parameters tf.flags.DEFINE_integer("projection_dimensions", 60, "PCA projection dimensions, or 0 for no projection.") tf.flags.DEFINE_integer("num_hidden_layers", 1, "Number of hidden layers in the network") tf.flags.DEFINE_integer("hidden_layer_num_units", 1000, "Number of units per hidden layer") tf.flags.DEFINE_float("default_gradient_l2norm_bound", 4.0, "norm clipping") tf.flags.DEFINE_integer("num_conv_layers", 0, "Number of convolutional layers to use.") tf.flags.DEFINE_string("training_data_path", "/tmp/mnist/mnist_train.tfrecord", "Location of the training data.") tf.flags.DEFINE_string("eval_data_path", "/tmp/mnist/mnist_test.tfrecord", "Location of the eval data.") tf.flags.DEFINE_integer("eval_steps", 10, "Evaluate the model every eval_steps") # Parameters for privacy spending. We allow linearly varying eps during # training. tf.flags.DEFINE_string("accountant_type", "Moments", "Moments, Amortized.") # Flags that control privacy spending during training. tf.flags.DEFINE_float("eps", 1.0, "Start privacy spending for one epoch of training, " "used if accountant_type is Amortized.") tf.flags.DEFINE_float("end_eps", 1.0, "End privacy spending for one epoch of training, " "used if accountant_type is Amortized.") tf.flags.DEFINE_float("eps_saturate_epochs", 0, "Stop varying epsilon after eps_saturate_epochs. Set to " "0 for constant eps of --eps. " "Used if accountant_type is Amortized.") tf.flags.DEFINE_float("delta", 1e-5, "Privacy spending for training. Constant through " "training, used if accountant_type is Amortized.") tf.flags.DEFINE_float("sigma", 4.0, "Noise sigma, used only if accountant_type is Moments") # Flags that control privacy spending for the pca projection # (only used if --projection_dimensions > 0). tf.flags.DEFINE_float("pca_eps", 0.5, "Privacy spending for PCA, used if accountant_type is " "Amortized.") tf.flags.DEFINE_float("pca_delta", 0.005, "Privacy spending for PCA, used if accountant_type is " "Amortized.") tf.flags.DEFINE_float("pca_sigma", 7.0, "Noise sigma for PCA, used if accountant_type is Moments") tf.flags.DEFINE_string("target_eps", "0.125,0.25,0.5,1,2,4,8", "Log the privacy loss for the target epsilon's. Only " "used when accountant_type is Moments.") tf.flags.DEFINE_float("target_delta", 1e-5, "Maximum delta for --terminate_based_on_privacy.") tf.flags.DEFINE_bool("terminate_based_on_privacy", False, "Stop training if privacy spent exceeds " "(max(--target_eps), --target_delta), even " "if --num_training_steps have not yet been completed.") tf.flags.DEFINE_string("save_path", "/tmp/mnist_dir", "Directory for saving model outputs.") FLAGS = tf.flags.FLAGS NUM_TRAINING_IMAGES = 60000 NUM_TESTING_IMAGES = 10000 IMAGE_SIZE = 28 def MnistInput(mnist_data_file, batch_size, randomize): """Create operations to read the MNIST input file. Args: mnist_data_file: Path of a file containing the MNIST images to process. batch_size: size of the mini batches to generate. randomize: If true, randomize the dataset. Returns: images: A tensor with the formatted image data. shape [batch_size, 28*28] labels: A tensor with the labels for each image. shape [batch_size] """ file_queue = tf.train.string_input_producer([mnist_data_file]) reader = tf.TFRecordReader() _, value = reader.read(file_queue) example = tf.parse_single_example( value, features={"image/encoded": tf.FixedLenFeature(shape=(), dtype=tf.string), "image/class/label": tf.FixedLenFeature([1], tf.int64)}) image = tf.cast(tf.image.decode_png(example["image/encoded"], channels=1), tf.float32) image = tf.reshape(image, [IMAGE_SIZE * IMAGE_SIZE]) image /= 255 label = tf.cast(example["image/class/label"], dtype=tf.int32) label = tf.reshape(label, []) if randomize: images, labels = tf.train.shuffle_batch( [image, label], batch_size=batch_size, capacity=(batch_size * 100), min_after_dequeue=(batch_size * 10)) else: images, labels = tf.train.batch([image, label], batch_size=batch_size) return images, labels def Eval(mnist_data_file, network_parameters, num_testing_images, randomize, load_path, save_mistakes=False): """Evaluate MNIST for a number of steps. Args: mnist_data_file: Path of a file containing the MNIST images to process. network_parameters: parameters for defining and training the network. num_testing_images: the number of images we will evaluate on. randomize: if false, randomize; otherwise, read the testing images sequentially. load_path: path where to load trained parameters from. save_mistakes: save the mistakes if True. Returns: The evaluation accuracy as a float. """ batch_size = 100 # Like for training, we need a session for executing the TensorFlow graph. with tf.Graph().as_default(), tf.Session() as sess: # Create the basic Mnist model. images, labels = MnistInput(mnist_data_file, batch_size, randomize) logits, _, _ = utils.BuildNetwork(images, network_parameters) softmax = tf.nn.softmax(logits) # Load the variables. ckpt_state = tf.train.get_checkpoint_state(load_path) if not (ckpt_state and ckpt_state.model_checkpoint_path): raise ValueError("No model checkpoint to eval at %s\n" % load_path) saver = tf.train.Saver() saver.restore(sess, ckpt_state.model_checkpoint_path) coord = tf.train.Coordinator() _ = tf.train.start_queue_runners(sess=sess, coord=coord) total_examples = 0 correct_predictions = 0 image_index = 0 mistakes = [] for _ in xrange((num_testing_images + batch_size - 1) // batch_size): predictions, label_values = sess.run([softmax, labels]) # Count how many were predicted correctly. for prediction, label_value in zip(predictions, label_values): total_examples += 1 if np.argmax(prediction) == label_value: correct_predictions += 1 elif save_mistakes: mistakes.append({"index": image_index, "label": label_value, "pred": np.argmax(prediction)}) image_index += 1 return (correct_predictions / total_examples, mistakes if save_mistakes else None) def Train(mnist_train_file, mnist_test_file, network_parameters, num_steps, save_path, eval_steps=0): """Train MNIST for a number of steps. Args: mnist_train_file: path of MNIST train data file. mnist_test_file: path of MNIST test data file. network_parameters: parameters for defining and training the network. num_steps: number of steps to run. Here steps = lots save_path: path where to save trained parameters. eval_steps: evaluate the model every eval_steps. Returns: the result after the final training step. Raises: ValueError: if the accountant_type is not supported. """ batch_size = FLAGS.batch_size params = {"accountant_type": FLAGS.accountant_type, "task_id": 0, "batch_size": FLAGS.batch_size, "projection_dimensions": FLAGS.projection_dimensions, "default_gradient_l2norm_bound": network_parameters.default_gradient_l2norm_bound, "num_hidden_layers": FLAGS.num_hidden_layers, "hidden_layer_num_units": FLAGS.hidden_layer_num_units, "num_examples": NUM_TRAINING_IMAGES, "learning_rate": FLAGS.lr, "end_learning_rate": FLAGS.end_lr, "learning_rate_saturate_epochs": FLAGS.lr_saturate_epochs } # Log different privacy parameters dependent on the accountant type. if FLAGS.accountant_type == "Amortized": params.update({"flag_eps": FLAGS.eps, "flag_delta": FLAGS.delta, "flag_pca_eps": FLAGS.pca_eps, "flag_pca_delta": FLAGS.pca_delta, }) elif FLAGS.accountant_type == "Moments": params.update({"sigma": FLAGS.sigma, "pca_sigma": FLAGS.pca_sigma, }) with tf.Graph().as_default(), tf.Session() as sess, tf.device('/cpu:0'): # Create the basic Mnist model. images, labels = MnistInput(mnist_train_file, batch_size, FLAGS.randomize) logits, projection, training_params = utils.BuildNetwork( images, network_parameters) cost = tf.nn.softmax_cross_entropy_with_logits( logits=logits, labels=tf.one_hot(labels, 10)) # The actual cost is the average across the examples. cost = tf.reduce_sum(cost, [0]) / batch_size if FLAGS.accountant_type == "Amortized": priv_accountant = accountant.AmortizedAccountant(NUM_TRAINING_IMAGES) sigma = None pca_sigma = None with_privacy = FLAGS.eps > 0 elif FLAGS.accountant_type == "Moments": priv_accountant = accountant.GaussianMomentsAccountant( NUM_TRAINING_IMAGES) sigma = FLAGS.sigma pca_sigma = FLAGS.pca_sigma with_privacy = FLAGS.sigma > 0 else: raise ValueError("Undefined accountant type, needs to be " "Amortized or Moments, but got %s" % FLAGS.accountant) # Note: Here and below, we scale down the l2norm_bound by # batch_size. This is because per_example_gradients computes the # gradient of the minibatch loss with respect to each individual # example, and the minibatch loss (for our model) is the *average* # loss over examples in the minibatch. Hence, the scale of the # per-example gradients goes like 1 / batch_size. gaussian_sanitizer = sanitizer.AmortizedGaussianSanitizer( priv_accountant, [network_parameters.default_gradient_l2norm_bound / batch_size, True]) for var in training_params: if "gradient_l2norm_bound" in training_params[var]: l2bound = training_params[var]["gradient_l2norm_bound"] / batch_size gaussian_sanitizer.set_option(var, sanitizer.ClipOption(l2bound, True)) lr = tf.placeholder(tf.float32) eps = tf.placeholder(tf.float32) delta = tf.placeholder(tf.float32) init_ops = [] if network_parameters.projection_type == "PCA": with tf.variable_scope("pca"): # Compute differentially private PCA. all_data, _ = MnistInput(mnist_train_file, NUM_TRAINING_IMAGES, False) pca_projection = dp_pca.ComputeDPPrincipalProjection( all_data, network_parameters.projection_dimensions, gaussian_sanitizer, [FLAGS.pca_eps, FLAGS.pca_delta], pca_sigma) assign_pca_proj = tf.assign(projection, pca_projection) init_ops.append(assign_pca_proj) # Add global_step global_step = tf.Variable(0, dtype=tf.int32, trainable=False, name="global_step") if with_privacy: gd_op = dp_optimizer.DPGradientDescentOptimizer( lr, [eps, delta], gaussian_sanitizer, sigma=sigma, batches_per_lot=FLAGS.batches_per_lot).minimize( cost, global_step=global_step) else: gd_op = tf.train.GradientDescentOptimizer(lr).minimize(cost) saver = tf.train.Saver() coord = tf.train.Coordinator() _ = tf.train.start_queue_runners(sess=sess, coord=coord) # We need to maintain the intialization sequence. for v in tf.trainable_variables(): sess.run(tf.variables_initializer([v])) sess.run(tf.global_variables_initializer()) sess.run(init_ops) results = [] start_time = time.time() prev_time = start_time filename = "results-0.json" log_path = os.path.join(save_path, filename) target_eps = [float(s) for s in FLAGS.target_eps.split(",")] if FLAGS.accountant_type == "Amortized": # Only matters if --terminate_based_on_privacy is true. target_eps = [max(target_eps)] max_target_eps = max(target_eps) lot_size = FLAGS.batches_per_lot * FLAGS.batch_size lots_per_epoch = NUM_TRAINING_IMAGES / lot_size for step in xrange(num_steps): epoch = step / lots_per_epoch curr_lr = utils.VaryRate(FLAGS.lr, FLAGS.end_lr, FLAGS.lr_saturate_epochs, epoch) curr_eps = utils.VaryRate(FLAGS.eps, FLAGS.end_eps, FLAGS.eps_saturate_epochs, epoch) for _ in xrange(FLAGS.batches_per_lot): _ = sess.run( [gd_op], feed_dict={lr: curr_lr, eps: curr_eps, delta: FLAGS.delta}) sys.stderr.write("step: %d\n" % step) # See if we should stop training due to exceeded privacy budget: should_terminate = False terminate_spent_eps_delta = None if with_privacy and FLAGS.terminate_based_on_privacy: terminate_spent_eps_delta = priv_accountant.get_privacy_spent( sess, target_eps=[max_target_eps])[0] # For the Moments accountant, we should always have # spent_eps == max_target_eps. if (terminate_spent_eps_delta.spent_delta > FLAGS.target_delta or terminate_spent_eps_delta.spent_eps > max_target_eps): should_terminate = True if (eval_steps > 0 and (step + 1) % eval_steps == 0) or should_terminate: if with_privacy: spent_eps_deltas = priv_accountant.get_privacy_spent( sess, target_eps=target_eps) else: spent_eps_deltas = [accountant.EpsDelta(0, 0)] for spent_eps, spent_delta in spent_eps_deltas: sys.stderr.write("spent privacy: eps %.4f delta %.5g\n" % ( spent_eps, spent_delta)) saver.save(sess, save_path=save_path + "/ckpt") train_accuracy, _ = Eval(mnist_train_file, network_parameters, num_testing_images=NUM_TESTING_IMAGES, randomize=True, load_path=save_path) sys.stderr.write("train_accuracy: %.2f\n" % train_accuracy) test_accuracy, mistakes = Eval(mnist_test_file, network_parameters, num_testing_images=NUM_TESTING_IMAGES, randomize=False, load_path=save_path, save_mistakes=FLAGS.save_mistakes) sys.stderr.write("eval_accuracy: %.2f\n" % test_accuracy) curr_time = time.time() elapsed_time = curr_time - prev_time prev_time = curr_time results.append({"step": step+1, # Number of lots trained so far. "elapsed_secs": elapsed_time, "spent_eps_deltas": spent_eps_deltas, "train_accuracy": train_accuracy, "test_accuracy": test_accuracy, "mistakes": mistakes}) loginfo = {"elapsed_secs": curr_time-start_time, "spent_eps_deltas": spent_eps_deltas, "train_accuracy": train_accuracy, "test_accuracy": test_accuracy, "num_training_steps": step+1, # Steps so far. "mistakes": mistakes, "result_series": results} loginfo.update(params) if log_path: with tf.gfile.Open(log_path, "w") as f: json.dump(loginfo, f, indent=2) f.write("\n") f.close() if should_terminate: break def main(_): network_parameters = utils.NetworkParameters() # If the ASCII proto isn't specified, then construct a config protobuf based # on 3 flags. network_parameters.input_size = IMAGE_SIZE ** 2 network_parameters.default_gradient_l2norm_bound = ( FLAGS.default_gradient_l2norm_bound) if FLAGS.projection_dimensions > 0 and FLAGS.num_conv_layers > 0: raise ValueError("Currently you can't do PCA and have convolutions" "at the same time. Pick one") # could add support for PCA after convolutions. # Currently BuildNetwork can build the network with conv followed by # projection, but the PCA training works on data, rather than data run # through a few layers. Will need to init the convs before running the # PCA, and need to change the PCA subroutine to take a network and perhaps # allow for batched inputs, to handle larger datasets. if FLAGS.num_conv_layers > 0: conv = utils.ConvParameters() conv.name = "conv1" conv.in_channels = 1 conv.out_channels = 128 conv.num_outputs = 128 * 14 * 14 network_parameters.conv_parameters.append(conv) # defaults for the rest: 5x5,stride 1, relu, maxpool 2x2,stride 2. # insize 28x28, bias, stddev 0.1, non-trainable. if FLAGS.num_conv_layers > 1: conv = network_parameters.ConvParameters() conv.name = "conv2" conv.in_channels = 128 conv.out_channels = 128 conv.num_outputs = 128 * 7 * 7 conv.in_size = 14 # defaults for the rest: 5x5,stride 1, relu, maxpool 2x2,stride 2. # bias, stddev 0.1, non-trainable. network_parameters.conv_parameters.append(conv) if FLAGS.num_conv_layers > 2: raise ValueError("Currently --num_conv_layers must be 0,1 or 2." "Manually create a network_parameters proto for more.") if FLAGS.projection_dimensions > 0: network_parameters.projection_type = "PCA" network_parameters.projection_dimensions = FLAGS.projection_dimensions for i in xrange(FLAGS.num_hidden_layers): hidden = utils.LayerParameters() hidden.name = "hidden%d" % i hidden.num_units = FLAGS.hidden_layer_num_units hidden.relu = True hidden.with_bias = False hidden.trainable = not FLAGS.freeze_bottom_layers network_parameters.layer_parameters.append(hidden) logits = utils.LayerParameters() logits.name = "logits" logits.num_units = 10 logits.relu = False logits.with_bias = False network_parameters.layer_parameters.append(logits) Train(FLAGS.training_data_path, FLAGS.eval_data_path, network_parameters, FLAGS.num_training_steps, FLAGS.save_path, eval_steps=FLAGS.eval_steps) if __name__ == "__main__": tf.app.run()
gpl-2.0
tensorflow/tensorflow-experimental_link_static_libraries_once
tensorflow/python/debug/examples/v1/debug_mnist_v1.py
12
8076
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Demo of the tfdbg curses CLI: Locating the source of bad numerical values. The neural network in this demo is larged based on the tutorial at: tensorflow/examples/tutorials/mnist/mnist_with_summaries.py But modifications are made so that problematic numerical values (infs and nans) appear in nodes of the graph during training. """ import argparse import sys import tempfile import tensorflow from tensorflow.python import debug as tf_debug tf = tensorflow.compat.v1 IMAGE_SIZE = 28 HIDDEN_SIZE = 500 NUM_LABELS = 10 RAND_SEED = 42 FLAGS = None def parse_args(): """Parses commandline arguments. Returns: A tuple (parsed, unparsed) of the parsed object and a group of unparsed arguments that did not match the parser. """ parser = argparse.ArgumentParser() parser.register("type", "bool", lambda v: v.lower() == "true") parser.add_argument( "--max_steps", type=int, default=10, help="Number of steps to run trainer.") parser.add_argument( "--train_batch_size", type=int, default=100, help="Batch size used during training.") parser.add_argument( "--learning_rate", type=float, default=0.025, help="Initial learning rate.") parser.add_argument( "--data_dir", type=str, default="/tmp/mnist_data", help="Directory for storing data") parser.add_argument( "--ui_type", type=str, default="curses", help="Command-line user interface type (curses | readline)") parser.add_argument( "--fake_data", type="bool", nargs="?", const=True, default=False, help="Use fake MNIST data for unit testing") parser.add_argument( "--debug", type="bool", nargs="?", const=True, default=False, help="Use debugger to track down bad values during training. " "Mutually exclusive with the --tensorboard_debug_address flag.") parser.add_argument( "--tensorboard_debug_address", type=str, default=None, help="Connect to the TensorBoard Debugger Plugin backend specified by " "the gRPC address (e.g., localhost:1234). Mutually exclusive with the " "--debug flag.") parser.add_argument( "--use_random_config_path", type="bool", nargs="?", const=True, default=False, help="""If set, set config file path to a random file in the temporary directory.""") return parser.parse_known_args() def main(_): # Import data if FLAGS.fake_data: imgs = tf.random.uniform(maxval=256, shape=(10, 28, 28), dtype=tf.int32) labels = tf.random.uniform(maxval=10, shape=(10,), dtype=tf.int32) mnist_train = imgs, labels mnist_test = imgs, labels else: mnist_train, mnist_test = tf.keras.datasets.mnist.load_data() def format_example(imgs, labels): imgs = tf.reshape(imgs, [-1, 28 * 28]) imgs = tf.cast(imgs, tf.float32) / 255.0 labels = tf.one_hot(labels, depth=10, dtype=tf.float32) return imgs, labels ds_train = tf.data.Dataset.from_tensor_slices(mnist_train) ds_train = ds_train.shuffle( 1000, seed=RAND_SEED).repeat().batch(FLAGS.train_batch_size) ds_train = ds_train.map(format_example) it_train = ds_train.make_initializable_iterator() ds_test = tf.data.Dataset.from_tensors(mnist_test).repeat() ds_test = ds_test.map(format_example) it_test = ds_test.make_initializable_iterator() sess = tf.InteractiveSession() # Create the MNIST neural network graph. # Input placeholders. with tf.name_scope("input"): handle = tf.placeholder(tf.string, shape=()) iterator = tf.data.Iterator.from_string_handle( handle, (tf.float32, tf.float32), ((None, IMAGE_SIZE * IMAGE_SIZE), (None, 10))) x, y_ = iterator.get_next() def weight_variable(shape): """Create a weight variable with appropriate initialization.""" initial = tf.truncated_normal(shape, stddev=0.1, seed=RAND_SEED) return tf.Variable(initial) def bias_variable(shape): """Create a bias variable with appropriate initialization.""" initial = tf.constant(0.1, shape=shape) return tf.Variable(initial) def nn_layer(input_tensor, input_dim, output_dim, layer_name, act=tf.nn.relu): """Reusable code for making a simple neural net layer.""" # Adding a name scope ensures logical grouping of the layers in the graph. with tf.name_scope(layer_name): # This Variable will hold the state of the weights for the layer with tf.name_scope("weights"): weights = weight_variable([input_dim, output_dim]) with tf.name_scope("biases"): biases = bias_variable([output_dim]) with tf.name_scope("Wx_plus_b"): preactivate = tf.matmul(input_tensor, weights) + biases activations = act(preactivate) return activations hidden = nn_layer(x, IMAGE_SIZE**2, HIDDEN_SIZE, "hidden") logits = nn_layer(hidden, HIDDEN_SIZE, NUM_LABELS, "output", tf.identity) y = tf.nn.softmax(logits) with tf.name_scope("cross_entropy"): # The following line is the culprit of the bad numerical values that appear # during training of this graph. Log of zero gives inf, which is first seen # in the intermediate tensor "cross_entropy/Log:0" during the 4th run() # call. A multiplication of the inf values with zeros leads to nans, # which is first in "cross_entropy/mul:0". # # You can use the built-in, numerically-stable implementation to fix this # issue: # diff = tf.nn.softmax_cross_entropy_with_logits(labels=y_, logits=logits) diff = -(y_ * tf.log(y)) with tf.name_scope("total"): cross_entropy = tf.reduce_mean(diff) with tf.name_scope("train"): train_step = tf.train.AdamOptimizer( FLAGS.learning_rate).minimize(cross_entropy) with tf.name_scope("accuracy"): with tf.name_scope("correct_prediction"): correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1)) with tf.name_scope("accuracy"): accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32)) sess.run(tf.global_variables_initializer()) sess.run(it_train.initializer) sess.run(it_test.initializer) train_handle = sess.run(it_train.string_handle()) test_handle = sess.run(it_test.string_handle()) if FLAGS.debug and FLAGS.tensorboard_debug_address: raise ValueError( "The --debug and --tensorboard_debug_address flags are mutually " "exclusive.") if FLAGS.debug: if FLAGS.use_random_config_path: _, config_file_path = tempfile.mkstemp(".tfdbg_config") else: config_file_path = None sess = tf_debug.LocalCLIDebugWrapperSession( sess, ui_type=FLAGS.ui_type, config_file_path=config_file_path) elif FLAGS.tensorboard_debug_address: sess = tf_debug.TensorBoardDebugWrapperSession( sess, FLAGS.tensorboard_debug_address) # Add this point, sess is a debug wrapper around the actual Session if # FLAGS.debug is true. In that case, calling run() will launch the CLI. for i in range(FLAGS.max_steps): acc = sess.run(accuracy, feed_dict={handle: test_handle}) print("Accuracy at step %d: %s" % (i, acc)) sess.run(train_step, feed_dict={handle: train_handle}) if __name__ == "__main__": FLAGS, unparsed = parse_args() with tf.Graph().as_default(): tf.app.run(main=main, argv=[sys.argv[0]] + unparsed)
apache-2.0
Ambrosys/climatelearn
climatelearn/learning/regression/pybrain_MP.py
1
10513
import numpy as np from copy import copy from copy import deepcopy import pandas as pd from pybrain.structure import FeedForwardNetwork from pybrain.structure import LinearLayer, SigmoidLayer from pybrain.structure import FullConnection from pybrain.datasets import SupervisedDataSet from pybrain.supervised.trainers import BackpropTrainer from climatelearn.learning.base_class import Regression from .. import errors class MP_Pybrain(Regression): """ Fully connected multilayer perceptron using pybrain library. """ def __init__(self, train_data, hyper, n_targets=None, label_targets=None): """ ------------ train_data: pandas DataFrame Contains columns for features and for target variables. The names of the target variables ends with the suffix "_tau" hyper: dictionary It contains the hyperparameters necessary to run all the functionalities of the model. They are the following: "structure" is a list of integers determining the number of neurons in each hidden layer "epochs" an integer specifying the maximum number of epochs to run during every training session "learning_rate" a float giving the learning rate of the gradient descend "momentum" a float giving the value of the momentum for the algorithm "batch" a bool. If True the method performs full batch learning, i.e. updates of the weights is done using all the instances of the training set. Else, normal online method is performed Other parameters regarding cross validation are explained in the base class """ Regression.__init__(self, train_data, hyper, n_targets=n_targets, label_targets=label_targets) self.N = FeedForwardNetwork() self.structure = [self.n_feature] + hyper['structure'] + [self.n_target] self._build_net(self.structure) self.res_params = [self.N.params[i] for i in range(len(self.N.params))] self.train_fraction = hyper['train_fraction'] self.seed = hyper['seed'] self.epochs = hyper['epochs'] self.learning_rate = hyper['learning_rate'] self.momentum = hyper['momentum'] self.batch = bool(hyper['batch']) def learn(self, train_data = None, seed = None): """ Performs single run training, and it is designed to be called after network instantiation. ---------- train_data: pandas Dataframe It needs to contain datetime objects on index, and both features and target variables. The target variables need to end with the suffix "_tau". If None the self.train_set variable passed at the moment of instantiation will be used. Returns: tuple(MP_Pybrain object,float) It returns the model with the lowest training error, and the value of the training error. """ if train_data is not None: self.train_set = train_data self.randomize() ds_train, ds_valid = self._build_dataset(self.train_set) trainer = BackpropTrainer(self.N, ds_train, learningrate=self.learning_rate, momentum=self.momentum,batchlearning=self.batch) trainer.train() e_train = [self._error(ds_train)] e_valid = [self._error(ds_valid)] final_model = copy(self) fin_error_train = e_train[0] fin_error_valid = e_valid[0] for i in range(1,self.epochs): if i%10 == 0: print "epoch: ", i trainer.train() e_train.append(self._error(ds_train)) e_valid.append(self._error(ds_valid)) if e_train[-1] < fin_error_train: final_model = deepcopy(self) fin_error_train = e_train[-1] fin_error_valid = e_valid[-1] return final_model, fin_error_train, fin_error_valid def xvalidate(self, train_data = None, folds = None): """ Performs n-folds cross-validation on the a data set. The method is designed to reset the network to an initial configuration (decided at the moment of instantiation) every time a new training is started. The purpose is to make model comparison and returning an average error given a specific data set and collection of hyper-parameters. At the moment training and validation sets are chosen based on the input sequence of data, i.e. there is no random shuffling of the instances of the data set. ---------- train_data: pandas Dataframe It needs to contain datetime objects on index, and both features and target variables. The target variables need to end with the suffix "_tau". If None the self.train_set variable passed at the moment of instantiation will be used. folds: integer The number of training/validation partition used in the method. If None it needs to be passed in the constructor when instantiating the object for the first time. If not passed ever, the method cannot work and an exception needs to be thrown. Returns: list, float, float A list of all the models trained for each fold, the mean train error and the cross-validation error, i.e. the average of NRMSE for all the training/validation partitions created. """ if train_data is not None: self.train_set = train_data if folds is not None: self.cv_folds = folds train, validation = self._build_folds(random=False) models = [] train_error = [] cv_error = [] for i in range(self.cv_folds): print "Cross-validation Fold: ", i+1 self.randomize() model, error, _ = self.learn(train_data=train[i]) models.append(deepcopy(model)) train_error.append(error) predicted, actual = self.test(validation[i]) e = 0 for k in predicted.keys(): e += errors.RMSE(np.array(actual[k]),np.array(predicted[k])) cv_error.append(e) return models, np.mean(train_error), np.mean(cv_error) def test(self, data): """ Tests the trained model on data. The usage is two fold: 1) Internal usage to calculate errors on validation sets. 2) For external usage when a test set is provided. Both the validation and test set need to contain target columns. For prediction, where target variables are unknown, please refer to the function self.predict below. ---------- data: pandas Dataframe A pandas dataframe. A deepcopy of it will be made and only the feature columns will be considered. Due to the functionality of the pyBrain library we require (at the moment) that the order of the colums is the same as the one of the training set used for training. Returns: pandas Dataframe A Dataframe with columns containing the predictions of the different target variables and same index as the input DataFrame """ data_x = data[self.features] data_y = data[self.targets] predicted = np.array([]) for i in range(len(data_x)): predicted = np.append(predicted, self.N.activate(data_x.values[i])) return pd.DataFrame(predicted, index=data.index, columns=self.targets), data_y def predict(self, data): """ It returns target variables given a set of features, using the model trained and saved. --------- data: pandas Dataframe It must contain all the feature columns used for training of the model Returns: pandas Dataframe It contains the prediction on the target variables. The name of the variables is the same as the one provided at the moment of instantiation of object. """ data_x = data[self.features] predicted = np.array([]) for i in range(len(data_x)): predicted = np.append(predicted, self.N.activate(data_x.values[i])) return pd.DataFrame(predicted, index=data_x.index, columns=self.targets) def randomize(self): self.N.randomize() pass ### Private functions ### def _error(self, ds): """ Calculates the RMSE over an input dataset, given the current state of the network. ds: Supervised dataset pybrain style Returns: float The total error between prediction and actual values. """ predicted = np.array([list(self.N.activate(x)) for x in ds['input']]).transpose() actual = np.array([list(x) for x in ds['target']]).transpose() total_error = [errors.RMSE(np.array(actual[i]),np.array(predicted[i])) for i in range(len(actual))] return sum(total_error) def _build_net(self,s): layers = [LinearLayer(s[0])] self.N.addInputModule(layers[0]) for i in range(1,len(s)-1): layers.append(SigmoidLayer(s[i])) self.N.addModule(layers[i]) layers.append(SigmoidLayer(s[-1])) self.N.addOutputModule(layers[-1]) self._build_connections(layers) def _build_connections(self, l): for i,j in zip(l,l[1:]): a = FullConnection(i,j) self.N.addConnection(a) self.N.sortModules() def _build_dataset(self, data): """ Given a input training Dataframe with features and targets it returns the formatted training and validation datasets for pybrain usage, and randomly shuffled according to the self.seed given at instantiation. ---------- data: pandas Dataframe It must contains both features and target columns Returns: (pybrain dataset, pybrain dataset) The first is the training dataset and the second is the validation dataset """ np.random.seed(self.seed) permutation = np.random.permutation(np.arange(len(data))) sep = int(self.train_fraction * len(data)) x = data[self.features] y = data[self.targets] ds_train = SupervisedDataSet(self.n_feature, self.n_target) ds_valid = SupervisedDataSet(self.n_feature, self.n_target) for i in permutation[:sep]: ds_train.addSample(x.values[i], y.values[i]) for i in permutation[sep:]: ds_valid.addSample(x.values[i], y.values[i]) return ds_train, ds_valid
gpl-2.0
tapomayukh/projects_in_python
classification/Classification_with_kNN/Single_Contact_Classification/Feature_Comparison/single_feature/results/test10_cross_validate_categories_1200ms_scaled_method_v_area.py
1
4671
# Principal Component Analysis Code : from numpy import mean,cov,double,cumsum,dot,linalg,array,rank,size,flipud from pylab import * import numpy as np import matplotlib.pyplot as pp #from enthought.mayavi import mlab import scipy.ndimage as ni import roslib; roslib.load_manifest('sandbox_tapo_darpa_m3') import rospy #import hrl_lib.mayavi2_util as mu import hrl_lib.viz as hv import hrl_lib.util as ut import hrl_lib.matplotlib_util as mpu import pickle from mvpa.clfs.knn import kNN from mvpa.datasets import Dataset from mvpa.clfs.transerror import TransferError from mvpa.misc.data_generators import normalFeatureDataset from mvpa.algorithms.cvtranserror import CrossValidatedTransferError from mvpa.datasets.splitters import NFoldSplitter import sys sys.path.insert(0, '/home/tapo/svn/robot1_data/usr/tapo/data_code/Classification/Data/Single_Contact_kNN/Scaled') from data_method_V import Fmat_original def pca(X): #get dimensions num_data,dim = X.shape #center data mean_X = X.mean(axis=1) M = (X-mean_X) # subtract the mean (along columns) Mcov = cov(M) ###### Sanity Check ###### i=0 n=0 while i < 41: j=0 while j < 140: if X[i,j] != X[i,j]: print X[i,j] print i,j n=n+1 j = j+1 i=i+1 print n ########################## print 'PCA - COV-Method used' val,vec = linalg.eig(Mcov) #return the projection matrix, the variance and the mean return vec,val,mean_X, M, Mcov if __name__ == '__main__': Fmat = Fmat_original[41:82,:] # Checking the Data-Matrix m_tot, n_tot = np.shape(Fmat) print 'Total_Matrix_Shape:',m_tot,n_tot eigvec_total, eigval_total, mean_data_total, B, C = pca(Fmat) #print eigvec_total #print eigval_total #print mean_data_total m_eigval_total, n_eigval_total = np.shape(np.matrix(eigval_total)) m_eigvec_total, n_eigvec_total = np.shape(eigvec_total) m_mean_data_total, n_mean_data_total = np.shape(np.matrix(mean_data_total)) print 'Eigenvalue Shape:',m_eigval_total, n_eigval_total print 'Eigenvector Shape:',m_eigvec_total, n_eigvec_total print 'Mean-Data Shape:',m_mean_data_total, n_mean_data_total #Recall that the cumulative sum of the eigenvalues shows the level of variance accounted by each of the corresponding eigenvectors. On the x axis there is the number of eigenvalues used. perc_total = cumsum(eigval_total)/sum(eigval_total) # Reduced Eigen-Vector Matrix according to highest Eigenvalues..(Considering First 20 based on above figure) W = eigvec_total[:,0:3] m_W, n_W = np.shape(W) print 'Reduced Dimension Eigenvector Shape:',m_W, n_W # Normalizes the data set with respect to its variance (Not an Integral part of PCA, but sometimes useful) length = len(eigval_total) s = np.matrix(np.zeros(length)).T i = 0 while i < length: s[i] = sqrt(C[i,i]) i = i+1 Z = np.divide(B,s) m_Z, n_Z = np.shape(Z) print 'Z-Score Shape:', m_Z, n_Z #Projected Data: Y = (W.T)*B # 'B' for my Laptop: otherwise 'Z' instead of 'B' m_Y, n_Y = np.shape(Y.T) print 'Transposed Projected Data Shape:', m_Y, n_Y #Using PYMVPA PCA_data = np.array(Y.T) PCA_label_1 = ['Rigid-Fixed']*35 + ['Rigid-Movable']*35 + ['Soft-Fixed']*35 + ['Soft-Movable']*35 PCA_chunk_1 = ['Styrofoam-Fixed']*5 + ['Books-Fixed']*5 + ['Bucket-Fixed']*5 + ['Bowl-Fixed']*5 + ['Can-Fixed']*5 + ['Box-Fixed']*5 + ['Pipe-Fixed']*5 + ['Styrofoam-Movable']*5 + ['Container-Movable']*5 + ['Books-Movable']*5 + ['Cloth-Roll-Movable']*5 + ['Black-Rubber-Movable']*5 + ['Can-Movable']*5 + ['Box-Movable']*5 + ['Rug-Fixed']*5 + ['Bubble-Wrap-1-Fixed']*5 + ['Pillow-1-Fixed']*5 + ['Bubble-Wrap-2-Fixed']*5 + ['Sponge-Fixed']*5 + ['Foliage-Fixed']*5 + ['Pillow-2-Fixed']*5 + ['Rug-Movable']*5 + ['Bubble-Wrap-1-Movable']*5 + ['Pillow-1-Movable']*5 + ['Bubble-Wrap-2-Movable']*5 + ['Pillow-2-Movable']*5 + ['Cushion-Movable']*5 + ['Sponge-Movable']*5 clf = kNN(k=3) terr = TransferError(clf) ds1 = Dataset(samples=PCA_data,labels=PCA_label_1,chunks=PCA_chunk_1) print ds1.samples.shape cvterr = CrossValidatedTransferError(terr,NFoldSplitter(cvtype=1),enable_states=['confusion']) error = cvterr(ds1) print error print cvterr.confusion.asstring(description=False) figure(1) cvterr.confusion.plot(numbers='True') #show() # Variances figure(2) title('Variances of PCs') stem(range(len(perc_total)),perc_total,'--b') axis([-0.3,30.3,0,1.2]) grid('True') show()
mit
tapomayukh/projects_in_python
classification/Classification_with_kNN/Single_Contact_Classification/Scaled_Features/results/4_categories/test10_cross_validate_categories_1200ms_scaled_method_v.py
1
4665
# Principal Component Analysis Code : from numpy import mean,cov,double,cumsum,dot,linalg,array,rank,size,flipud from pylab import * import numpy as np import matplotlib.pyplot as pp #from enthought.mayavi import mlab import scipy.ndimage as ni import roslib; roslib.load_manifest('sandbox_tapo_darpa_m3') import rospy #import hrl_lib.mayavi2_util as mu import hrl_lib.viz as hv import hrl_lib.util as ut import hrl_lib.matplotlib_util as mpu import pickle from mvpa.clfs.knn import kNN from mvpa.datasets import Dataset from mvpa.clfs.transerror import TransferError from mvpa.misc.data_generators import normalFeatureDataset from mvpa.algorithms.cvtranserror import CrossValidatedTransferError from mvpa.datasets.splitters import NFoldSplitter import sys sys.path.insert(0, '/home/tapo/svn/robot1_data/usr/tapo/data_code/Classification/Data/Single_Contact_kNN/Scaled') from data_method_V import Fmat_original def pca(X): #get dimensions num_data,dim = X.shape #center data mean_X = X.mean(axis=1) M = (X-mean_X) # subtract the mean (along columns) Mcov = cov(M) ###### Sanity Check ###### i=0 n=0 while i < 123: j=0 while j < 140: if X[i,j] != X[i,j]: print X[i,j] print i,j n=n+1 j = j+1 i=i+1 print n ########################## print 'PCA - COV-Method used' val,vec = linalg.eig(Mcov) #return the projection matrix, the variance and the mean return vec,val,mean_X, M, Mcov if __name__ == '__main__': Fmat = Fmat_original # Checking the Data-Matrix m_tot, n_tot = np.shape(Fmat) print 'Total_Matrix_Shape:',m_tot,n_tot eigvec_total, eigval_total, mean_data_total, B, C = pca(Fmat) #print eigvec_total #print eigval_total #print mean_data_total m_eigval_total, n_eigval_total = np.shape(np.matrix(eigval_total)) m_eigvec_total, n_eigvec_total = np.shape(eigvec_total) m_mean_data_total, n_mean_data_total = np.shape(np.matrix(mean_data_total)) print 'Eigenvalue Shape:',m_eigval_total, n_eigval_total print 'Eigenvector Shape:',m_eigvec_total, n_eigvec_total print 'Mean-Data Shape:',m_mean_data_total, n_mean_data_total #Recall that the cumulative sum of the eigenvalues shows the level of variance accounted by each of the corresponding eigenvectors. On the x axis there is the number of eigenvalues used. perc_total = cumsum(eigval_total)/sum(eigval_total) # Reduced Eigen-Vector Matrix according to highest Eigenvalues..(Considering First 20 based on above figure) W = eigvec_total[:,0:15] m_W, n_W = np.shape(W) print 'Reduced Dimension Eigenvector Shape:',m_W, n_W # Normalizes the data set with respect to its variance (Not an Integral part of PCA, but sometimes useful) length = len(eigval_total) s = np.matrix(np.zeros(length)).T i = 0 while i < length: s[i] = sqrt(C[i,i]) i = i+1 Z = np.divide(B,s) m_Z, n_Z = np.shape(Z) print 'Z-Score Shape:', m_Z, n_Z #Projected Data: Y = (W.T)*B # 'B' for my Laptop: otherwise 'Z' instead of 'B' m_Y, n_Y = np.shape(Y.T) print 'Transposed Projected Data Shape:', m_Y, n_Y #Using PYMVPA PCA_data = np.array(Y.T) PCA_label_1 = ['Rigid-Fixed']*35 + ['Rigid-Movable']*35 + ['Soft-Fixed']*35 + ['Soft-Movable']*35 PCA_chunk_1 = ['Styrofoam-Fixed']*5 + ['Books-Fixed']*5 + ['Bucket-Fixed']*5 + ['Bowl-Fixed']*5 + ['Can-Fixed']*5 + ['Box-Fixed']*5 + ['Pipe-Fixed']*5 + ['Styrofoam-Movable']*5 + ['Container-Movable']*5 + ['Books-Movable']*5 + ['Cloth-Roll-Movable']*5 + ['Black-Rubber-Movable']*5 + ['Can-Movable']*5 + ['Box-Movable']*5 + ['Rug-Fixed']*5 + ['Bubble-Wrap-1-Fixed']*5 + ['Pillow-1-Fixed']*5 + ['Bubble-Wrap-2-Fixed']*5 + ['Sponge-Fixed']*5 + ['Foliage-Fixed']*5 + ['Pillow-2-Fixed']*5 + ['Rug-Movable']*5 + ['Bubble-Wrap-1-Movable']*5 + ['Pillow-1-Movable']*5 + ['Bubble-Wrap-2-Movable']*5 + ['Pillow-2-Movable']*5 + ['Cushion-Movable']*5 + ['Sponge-Movable']*5 clf = kNN(k=5) terr = TransferError(clf) ds1 = Dataset(samples=PCA_data,labels=PCA_label_1,chunks=PCA_chunk_1) print ds1.samples.shape cvterr = CrossValidatedTransferError(terr,NFoldSplitter(cvtype=1),enable_states=['confusion']) error = cvterr(ds1) print error print cvterr.confusion.asstring(description=False) figure(1) cvterr.confusion.plot(numbers='True') #show() # Variances figure(2) title('Variances of PCs') stem(range(len(perc_total)),perc_total,'--b') axis([-0.3,30.3,0,1.2]) grid('True') show()
mit
heli522/scikit-learn
examples/decomposition/plot_kernel_pca.py
350
2011
""" ========== Kernel PCA ========== This example shows that Kernel PCA is able to find a projection of the data that makes data linearly separable. """ print(__doc__) # Authors: Mathieu Blondel # Andreas Mueller # License: BSD 3 clause import numpy as np import matplotlib.pyplot as plt from sklearn.decomposition import PCA, KernelPCA from sklearn.datasets import make_circles np.random.seed(0) X, y = make_circles(n_samples=400, factor=.3, noise=.05) kpca = KernelPCA(kernel="rbf", fit_inverse_transform=True, gamma=10) X_kpca = kpca.fit_transform(X) X_back = kpca.inverse_transform(X_kpca) pca = PCA() X_pca = pca.fit_transform(X) # Plot results plt.figure() plt.subplot(2, 2, 1, aspect='equal') plt.title("Original space") reds = y == 0 blues = y == 1 plt.plot(X[reds, 0], X[reds, 1], "ro") plt.plot(X[blues, 0], X[blues, 1], "bo") plt.xlabel("$x_1$") plt.ylabel("$x_2$") X1, X2 = np.meshgrid(np.linspace(-1.5, 1.5, 50), np.linspace(-1.5, 1.5, 50)) X_grid = np.array([np.ravel(X1), np.ravel(X2)]).T # projection on the first principal component (in the phi space) Z_grid = kpca.transform(X_grid)[:, 0].reshape(X1.shape) plt.contour(X1, X2, Z_grid, colors='grey', linewidths=1, origin='lower') plt.subplot(2, 2, 2, aspect='equal') plt.plot(X_pca[reds, 0], X_pca[reds, 1], "ro") plt.plot(X_pca[blues, 0], X_pca[blues, 1], "bo") plt.title("Projection by PCA") plt.xlabel("1st principal component") plt.ylabel("2nd component") plt.subplot(2, 2, 3, aspect='equal') plt.plot(X_kpca[reds, 0], X_kpca[reds, 1], "ro") plt.plot(X_kpca[blues, 0], X_kpca[blues, 1], "bo") plt.title("Projection by KPCA") plt.xlabel("1st principal component in space induced by $\phi$") plt.ylabel("2nd component") plt.subplot(2, 2, 4, aspect='equal') plt.plot(X_back[reds, 0], X_back[reds, 1], "ro") plt.plot(X_back[blues, 0], X_back[blues, 1], "bo") plt.title("Original space after inverse transform") plt.xlabel("$x_1$") plt.ylabel("$x_2$") plt.subplots_adjust(0.02, 0.10, 0.98, 0.94, 0.04, 0.35) plt.show()
bsd-3-clause
DonBeo/scikit-learn
examples/decomposition/plot_kernel_pca.py
350
2011
""" ========== Kernel PCA ========== This example shows that Kernel PCA is able to find a projection of the data that makes data linearly separable. """ print(__doc__) # Authors: Mathieu Blondel # Andreas Mueller # License: BSD 3 clause import numpy as np import matplotlib.pyplot as plt from sklearn.decomposition import PCA, KernelPCA from sklearn.datasets import make_circles np.random.seed(0) X, y = make_circles(n_samples=400, factor=.3, noise=.05) kpca = KernelPCA(kernel="rbf", fit_inverse_transform=True, gamma=10) X_kpca = kpca.fit_transform(X) X_back = kpca.inverse_transform(X_kpca) pca = PCA() X_pca = pca.fit_transform(X) # Plot results plt.figure() plt.subplot(2, 2, 1, aspect='equal') plt.title("Original space") reds = y == 0 blues = y == 1 plt.plot(X[reds, 0], X[reds, 1], "ro") plt.plot(X[blues, 0], X[blues, 1], "bo") plt.xlabel("$x_1$") plt.ylabel("$x_2$") X1, X2 = np.meshgrid(np.linspace(-1.5, 1.5, 50), np.linspace(-1.5, 1.5, 50)) X_grid = np.array([np.ravel(X1), np.ravel(X2)]).T # projection on the first principal component (in the phi space) Z_grid = kpca.transform(X_grid)[:, 0].reshape(X1.shape) plt.contour(X1, X2, Z_grid, colors='grey', linewidths=1, origin='lower') plt.subplot(2, 2, 2, aspect='equal') plt.plot(X_pca[reds, 0], X_pca[reds, 1], "ro") plt.plot(X_pca[blues, 0], X_pca[blues, 1], "bo") plt.title("Projection by PCA") plt.xlabel("1st principal component") plt.ylabel("2nd component") plt.subplot(2, 2, 3, aspect='equal') plt.plot(X_kpca[reds, 0], X_kpca[reds, 1], "ro") plt.plot(X_kpca[blues, 0], X_kpca[blues, 1], "bo") plt.title("Projection by KPCA") plt.xlabel("1st principal component in space induced by $\phi$") plt.ylabel("2nd component") plt.subplot(2, 2, 4, aspect='equal') plt.plot(X_back[reds, 0], X_back[reds, 1], "ro") plt.plot(X_back[blues, 0], X_back[blues, 1], "bo") plt.title("Original space after inverse transform") plt.xlabel("$x_1$") plt.ylabel("$x_2$") plt.subplots_adjust(0.02, 0.10, 0.98, 0.94, 0.04, 0.35) plt.show()
bsd-3-clause
googlearchive/rgc-models
response_model/python/ASM/su_fit_nov/fit_nsem_3_datasets.py
1
7209
# Copyright 2018 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== r"""Fit subunits with localized sparsity prior.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import os import os.path import pickle from absl import app from absl import flags import numpy as np import scipy.io as sio from tensorflow.python.platform import gfile from retina.response_model.python.ASM.su_fit_nov import su_model flags.DEFINE_string('src_dir', '/home/bhaishahster/NSEM_process/', 'temporary folder on machine for better I/O') flags.DEFINE_string('tmp_dir', '/home/bhaishahster/Downloads/' 'NSEM_process/NSEM_preprocess', 'temporary folder on machine for better I/O') flags.DEFINE_string('save_path', '/home/bhaishahster/' 'su_fits_nsem_3_datasets/', 'where to store results') flags.DEFINE_string('save_path_partial', '/home/bhaishahster/' 'su_fits_nsem_3_datasets_partial/', 'where to store results') flags.DEFINE_string('task_params_file', '/home/bhaishahster/tasks_nsem_3_datasets.txt', 'parameters of individual tasks') flags.DEFINE_integer('taskid', 0, 'Task ID') FLAGS = flags.FLAGS rng = np.random def main(argv): # read line corresponding to task with gfile.Open(FLAGS.task_params_file, 'r') as f: for _ in range(FLAGS.taskid + 1): line = f.readline() line = line[:-1] # Remove \n from end. print(line) # get task parameters by parsing the lines line_split = line.split(';') cell_idx = line_split[0] cell_idx = cell_idx[1:-1].split(',') cell_idx = int(cell_idx[0]) file_list = gfile.ListDirectory(FLAGS.src_dir) cell_file = file_list[cell_idx] print('Cell file %s' % cell_file) nsub = int(line_split[1]) projection_type = line_split[2] lam_proj = float(line_split[3]) # copy data dst = os.path.join(FLAGS.tmp_dir, cell_file) if not gfile.Exists(dst): print('Started Copy') src = os.path.join(FLAGS.src_dir, cell_file) if not gfile.IsDirectory(FLAGS.tmp_dir): gfile.MkDir(FLAGS.tmp_dir) gfile.Copy(src, dst) print('File copied to destination') else: print('File exists') # load stimulus, response data try: data = sio.loadmat(dst) trainMov_filterNSEM = data['trainMov_filterNSEM'] testMov_filterNSEM = data['testMov_filterNSEM'] trainSpksNSEM = data['trainSpksNSEM'] testSpksNSEM = data['testSpksNSEM'] mask = data['mask'] neighbor_mat = su_model.get_neighbormat(mask, nbd=1) trainMov_filterWN = data['trainMov_filterWN'] testMov_filterWN = data['testMov_filterWN'] trainSpksWN = data['trainSpksWN'] testSpksWN = data['testSpksWN'] # get NSEM stimulus and resposne stimulus_WN = np.array(trainMov_filterWN.transpose(), dtype='float32') response_WN = np.array(np.squeeze(trainSpksWN), dtype='float32') stimulus_NSEM = np.array(trainMov_filterNSEM.transpose(), dtype='float32') response_NSEM = np.array(np.squeeze(trainSpksNSEM), dtype='float32') print('Prepared data') # Do fitting # set random seed. np.random.seed(23) print('Made partitions') # Do fitting # WN data ifrac = 0.8 tms_train_WN = np.arange(0, np.floor(stimulus_WN.shape[0] * ifrac)).astype(np.int) tms_test_WN = np.arange(np.floor(stimulus_WN.shape[0] * ifrac), 1 * np.floor(stimulus_WN.shape[0] * 1)).astype(np.int) # NSEM data ifrac = 0.8 tms_train_NSEM = np.arange(0, np.floor(stimulus_NSEM.shape[0] * ifrac)).astype(np.int) tms_test_NSEM = np.arange(np.floor(stimulus_NSEM.shape[0] * ifrac), 1 * np.floor(stimulus_NSEM.shape[0] * 1)).astype(np.int) # Give filename ss = str(cell_idx) save_filename = os.path.join(FLAGS.save_path, 'Cell_%s_nsub_%d_%s_%.3f_jnt.pkl' % (ss, nsub, projection_type, lam_proj)) save_filename_partial = os.path.join(FLAGS.save_path_partial, 'Cell_%s_nsub_%d_%s_%.3f_jnt.pkl' % (ss, nsub, projection_type, lam_proj)) ## Do fitting if not gfile.Exists(save_filename): # Fit SU on WN print('Fitting started on WN') op = su_model.Flat_clustering_jnt(stimulus_WN, np.expand_dims(response_WN, 1), nsub, tms_train_WN, tms_test_WN, steps_max=10000, eps=1e-9, projection_type=projection_type, neighbor_mat=neighbor_mat, lam_proj=lam_proj, eps_proj=0.01, save_filename_partial= save_filename_partial, fitting_phases=[1]) _, _, alpha, lam_log_wn, lam_log_test_wn, fitting_phase, fit_params_wn = op print('WN fit done') # Fit on NSEM op = su_model.fit_scales(stimulus_NSEM[tms_train_NSEM, :], np.expand_dims(response_NSEM[tms_train_NSEM], 1), stimulus_NSEM[tms_test_NSEM, :], np.expand_dims(response_NSEM[tms_test_NSEM], 1), Ns=nsub, K=fit_params_wn[0][0], b=fit_params_wn[0][1], params=fit_params_wn[0][2], lr=0.01, eps=1e-9) k_nsem, b_nsem, nl_params_nsem, lam_log_nsem, lam_log_test_nsem = op # Collect results and save fit_params = fit_params_wn + [[k_nsem, b_nsem, nl_params_nsem]] lam_log = [lam_log_wn, np.array(lam_log_nsem)] lam_log_test = [lam_log_test_wn, np.array(lam_log_test_nsem)] save_dict = {'lam_log': lam_log, 'lam_log_test': lam_log_test, 'fit_params': fit_params, 'mask': mask} pickle.dump(save_dict, gfile.Open(save_filename, 'w')) print('Saved results') except: print('Error') if __name__ == '__main__': app.run(main)
apache-2.0
DonBeo/scikit-learn
sklearn/manifold/tests/test_spectral_embedding.py
215
8091
from nose.tools import assert_true from nose.tools import assert_equal from scipy.sparse import csr_matrix from scipy.sparse import csc_matrix import numpy as np from numpy.testing import assert_array_almost_equal, assert_array_equal from nose.tools import assert_raises from nose.plugins.skip import SkipTest from sklearn.manifold.spectral_embedding_ import SpectralEmbedding from sklearn.manifold.spectral_embedding_ import _graph_is_connected from sklearn.manifold import spectral_embedding from sklearn.metrics.pairwise import rbf_kernel from sklearn.metrics import normalized_mutual_info_score from sklearn.cluster import KMeans from sklearn.datasets.samples_generator import make_blobs # non centered, sparse centers to check the centers = np.array([ [0.0, 5.0, 0.0, 0.0, 0.0], [0.0, 0.0, 4.0, 0.0, 0.0], [1.0, 0.0, 0.0, 5.0, 1.0], ]) n_samples = 1000 n_clusters, n_features = centers.shape S, true_labels = make_blobs(n_samples=n_samples, centers=centers, cluster_std=1., random_state=42) def _check_with_col_sign_flipping(A, B, tol=0.0): """ Check array A and B are equal with possible sign flipping on each columns""" sign = True for column_idx in range(A.shape[1]): sign = sign and ((((A[:, column_idx] - B[:, column_idx]) ** 2).mean() <= tol ** 2) or (((A[:, column_idx] + B[:, column_idx]) ** 2).mean() <= tol ** 2)) if not sign: return False return True def test_spectral_embedding_two_components(seed=36): # Test spectral embedding with two components random_state = np.random.RandomState(seed) n_sample = 100 affinity = np.zeros(shape=[n_sample * 2, n_sample * 2]) # first component affinity[0:n_sample, 0:n_sample] = np.abs(random_state.randn(n_sample, n_sample)) + 2 # second component affinity[n_sample::, n_sample::] = np.abs(random_state.randn(n_sample, n_sample)) + 2 # connection affinity[0, n_sample + 1] = 1 affinity[n_sample + 1, 0] = 1 affinity.flat[::2 * n_sample + 1] = 0 affinity = 0.5 * (affinity + affinity.T) true_label = np.zeros(shape=2 * n_sample) true_label[0:n_sample] = 1 se_precomp = SpectralEmbedding(n_components=1, affinity="precomputed", random_state=np.random.RandomState(seed)) embedded_coordinate = se_precomp.fit_transform(affinity) # Some numpy versions are touchy with types embedded_coordinate = \ se_precomp.fit_transform(affinity.astype(np.float32)) # thresholding on the first components using 0. label_ = np.array(embedded_coordinate.ravel() < 0, dtype="float") assert_equal(normalized_mutual_info_score(true_label, label_), 1.0) def test_spectral_embedding_precomputed_affinity(seed=36): # Test spectral embedding with precomputed kernel gamma = 1.0 se_precomp = SpectralEmbedding(n_components=2, affinity="precomputed", random_state=np.random.RandomState(seed)) se_rbf = SpectralEmbedding(n_components=2, affinity="rbf", gamma=gamma, random_state=np.random.RandomState(seed)) embed_precomp = se_precomp.fit_transform(rbf_kernel(S, gamma=gamma)) embed_rbf = se_rbf.fit_transform(S) assert_array_almost_equal( se_precomp.affinity_matrix_, se_rbf.affinity_matrix_) assert_true(_check_with_col_sign_flipping(embed_precomp, embed_rbf, 0.05)) def test_spectral_embedding_callable_affinity(seed=36): # Test spectral embedding with callable affinity gamma = 0.9 kern = rbf_kernel(S, gamma=gamma) se_callable = SpectralEmbedding(n_components=2, affinity=( lambda x: rbf_kernel(x, gamma=gamma)), gamma=gamma, random_state=np.random.RandomState(seed)) se_rbf = SpectralEmbedding(n_components=2, affinity="rbf", gamma=gamma, random_state=np.random.RandomState(seed)) embed_rbf = se_rbf.fit_transform(S) embed_callable = se_callable.fit_transform(S) assert_array_almost_equal( se_callable.affinity_matrix_, se_rbf.affinity_matrix_) assert_array_almost_equal(kern, se_rbf.affinity_matrix_) assert_true( _check_with_col_sign_flipping(embed_rbf, embed_callable, 0.05)) def test_spectral_embedding_amg_solver(seed=36): # Test spectral embedding with amg solver try: from pyamg import smoothed_aggregation_solver except ImportError: raise SkipTest("pyamg not available.") se_amg = SpectralEmbedding(n_components=2, affinity="nearest_neighbors", eigen_solver="amg", n_neighbors=5, random_state=np.random.RandomState(seed)) se_arpack = SpectralEmbedding(n_components=2, affinity="nearest_neighbors", eigen_solver="arpack", n_neighbors=5, random_state=np.random.RandomState(seed)) embed_amg = se_amg.fit_transform(S) embed_arpack = se_arpack.fit_transform(S) assert_true(_check_with_col_sign_flipping(embed_amg, embed_arpack, 0.05)) def test_pipeline_spectral_clustering(seed=36): # Test using pipeline to do spectral clustering random_state = np.random.RandomState(seed) se_rbf = SpectralEmbedding(n_components=n_clusters, affinity="rbf", random_state=random_state) se_knn = SpectralEmbedding(n_components=n_clusters, affinity="nearest_neighbors", n_neighbors=5, random_state=random_state) for se in [se_rbf, se_knn]: km = KMeans(n_clusters=n_clusters, random_state=random_state) km.fit(se.fit_transform(S)) assert_array_almost_equal( normalized_mutual_info_score( km.labels_, true_labels), 1.0, 2) def test_spectral_embedding_unknown_eigensolver(seed=36): # Test that SpectralClustering fails with an unknown eigensolver se = SpectralEmbedding(n_components=1, affinity="precomputed", random_state=np.random.RandomState(seed), eigen_solver="<unknown>") assert_raises(ValueError, se.fit, S) def test_spectral_embedding_unknown_affinity(seed=36): # Test that SpectralClustering fails with an unknown affinity type se = SpectralEmbedding(n_components=1, affinity="<unknown>", random_state=np.random.RandomState(seed)) assert_raises(ValueError, se.fit, S) def test_connectivity(seed=36): # Test that graph connectivity test works as expected graph = np.array([[1, 0, 0, 0, 0], [0, 1, 1, 0, 0], [0, 1, 1, 1, 0], [0, 0, 1, 1, 1], [0, 0, 0, 1, 1]]) assert_equal(_graph_is_connected(graph), False) assert_equal(_graph_is_connected(csr_matrix(graph)), False) assert_equal(_graph_is_connected(csc_matrix(graph)), False) graph = np.array([[1, 1, 0, 0, 0], [1, 1, 1, 0, 0], [0, 1, 1, 1, 0], [0, 0, 1, 1, 1], [0, 0, 0, 1, 1]]) assert_equal(_graph_is_connected(graph), True) assert_equal(_graph_is_connected(csr_matrix(graph)), True) assert_equal(_graph_is_connected(csc_matrix(graph)), True) def test_spectral_embedding_deterministic(): # Test that Spectral Embedding is deterministic random_state = np.random.RandomState(36) data = random_state.randn(10, 30) sims = rbf_kernel(data) embedding_1 = spectral_embedding(sims) embedding_2 = spectral_embedding(sims) assert_array_almost_equal(embedding_1, embedding_2)
bsd-3-clause
KellyChan/Python
python/sklearn/examples/general/roc_with_cross_validation.py
3
2293
#---------------------------------------------------------------# # Project: Receiver operating characteristic (ROC) with cross validation # Author: Kelly Chan # Date: Apr 23 2014 #---------------------------------------------------------------# print(__doc__) import numpy as np import pylab as pl from scipy import interp from sklearn import svm, datasets from sklearn.metrics import roc_curve, auc from sklearn.cross_validation import StratifiedKFold def loadData(): iris = datasets.load_iris() X = iris.data y = iris.target X, y = X[y != 2], y[y != 2] n_samples, n_features = X.shape return X, y, n_samples, n_features def addNoise(X, n_samples, n_features): X = np.c_[X, np.random.randn(n_samples, 200 * n_features)] return X def crossValidation(y): cv = StratifiedKFold(y, n_folds=6) return cv def createSVM(): classifier = svm.SVC(kernel='linear', probability=True, random_state=0) return classifier def plotROC(classifier, cv, X, y): mean_tpr = 0.0 mean_fpr = np.linspace(0, 1, 100) all_tpr = [] for i, (train, test) in enumerate(cv): probas_ = classifier.fit(X[train], y[train]).predict_proba(X[test]) fpr, tpr, thresholds = roc_curve(y[test], probas_[:, 1]) mean_tpr += interp(mean_fpr, fpr, tpr) mean_tpr[0] = 0.0 roc_auc = auc(fpr, tpr) pl.plot(fpr, tpr, lw=1, \ label='ROC fold %d (area = %.2f)' % (i, roc_auc)) pl.plot([0, 1], [0, 1], '--', \ color=(0.6, 0.6, 0.6), \ label='Luck') mean_tpr /= len(cv) mean_tpr[-1] = 1.0 mean_auc = auc(mean_fpr, mean_tpr) pl.plot(mean_fpr, mean_tpr, 'k--', \ label='Mean ROC (area = %.2f)' % mean_auc, lw=2) pl.xlim([-0.05, 1.05]) pl.ylim([-0.05, 1.05]) pl.xlabel('False Positive Rate') pl.ylabel('True Positive Rate') pl.title('Receiver operating characteristic example') pl.legend(loc='lower right') pl.show() def test(): X, y, n_samples, n_features = loadData() X = addNoise(X, n_samples, n_features) cv = crossValidation(y) classifier = createSVM() plotROC(classifier, cv, X, y) if __name__ == '__main__': test()
mit
arabenjamin/scikit-learn
sklearn/utils/tests/test_extmath.py
129
16270
# Authors: Olivier Grisel <olivier.grisel@ensta.org> # Mathieu Blondel <mathieu@mblondel.org> # Denis Engemann <d.engemann@fz-juelich.de> # # License: BSD 3 clause import numpy as np from scipy import sparse from scipy import linalg from scipy import stats from sklearn.utils.testing import assert_equal from sklearn.utils.testing import assert_almost_equal from sklearn.utils.testing import assert_array_equal from sklearn.utils.testing import assert_array_almost_equal from sklearn.utils.testing import assert_true from sklearn.utils.testing import assert_greater from sklearn.utils.testing import assert_raises from sklearn.utils.extmath import density from sklearn.utils.extmath import logsumexp from sklearn.utils.extmath import norm, squared_norm from sklearn.utils.extmath import randomized_svd from sklearn.utils.extmath import row_norms from sklearn.utils.extmath import weighted_mode from sklearn.utils.extmath import cartesian from sklearn.utils.extmath import log_logistic from sklearn.utils.extmath import fast_dot, _fast_dot from sklearn.utils.extmath import svd_flip from sklearn.utils.extmath import _batch_mean_variance_update from sklearn.utils.extmath import _deterministic_vector_sign_flip from sklearn.datasets.samples_generator import make_low_rank_matrix def test_density(): rng = np.random.RandomState(0) X = rng.randint(10, size=(10, 5)) X[1, 2] = 0 X[5, 3] = 0 X_csr = sparse.csr_matrix(X) X_csc = sparse.csc_matrix(X) X_coo = sparse.coo_matrix(X) X_lil = sparse.lil_matrix(X) for X_ in (X_csr, X_csc, X_coo, X_lil): assert_equal(density(X_), density(X)) def test_uniform_weights(): # with uniform weights, results should be identical to stats.mode rng = np.random.RandomState(0) x = rng.randint(10, size=(10, 5)) weights = np.ones(x.shape) for axis in (None, 0, 1): mode, score = stats.mode(x, axis) mode2, score2 = weighted_mode(x, weights, axis) assert_true(np.all(mode == mode2)) assert_true(np.all(score == score2)) def test_random_weights(): # set this up so that each row should have a weighted mode of 6, # with a score that is easily reproduced mode_result = 6 rng = np.random.RandomState(0) x = rng.randint(mode_result, size=(100, 10)) w = rng.random_sample(x.shape) x[:, :5] = mode_result w[:, :5] += 1 mode, score = weighted_mode(x, w, axis=1) assert_array_equal(mode, mode_result) assert_array_almost_equal(score.ravel(), w[:, :5].sum(1)) def test_logsumexp(): # Try to add some smallish numbers in logspace x = np.array([1e-40] * 1000000) logx = np.log(x) assert_almost_equal(np.exp(logsumexp(logx)), x.sum()) X = np.vstack([x, x]) logX = np.vstack([logx, logx]) assert_array_almost_equal(np.exp(logsumexp(logX, axis=0)), X.sum(axis=0)) assert_array_almost_equal(np.exp(logsumexp(logX, axis=1)), X.sum(axis=1)) def test_randomized_svd_low_rank(): # Check that extmath.randomized_svd is consistent with linalg.svd n_samples = 100 n_features = 500 rank = 5 k = 10 # generate a matrix X of approximate effective rank `rank` and no noise # component (very structured signal): X = make_low_rank_matrix(n_samples=n_samples, n_features=n_features, effective_rank=rank, tail_strength=0.0, random_state=0) assert_equal(X.shape, (n_samples, n_features)) # compute the singular values of X using the slow exact method U, s, V = linalg.svd(X, full_matrices=False) # compute the singular values of X using the fast approximate method Ua, sa, Va = randomized_svd(X, k) assert_equal(Ua.shape, (n_samples, k)) assert_equal(sa.shape, (k,)) assert_equal(Va.shape, (k, n_features)) # ensure that the singular values of both methods are equal up to the real # rank of the matrix assert_almost_equal(s[:k], sa) # check the singular vectors too (while not checking the sign) assert_almost_equal(np.dot(U[:, :k], V[:k, :]), np.dot(Ua, Va)) # check the sparse matrix representation X = sparse.csr_matrix(X) # compute the singular values of X using the fast approximate method Ua, sa, Va = randomized_svd(X, k) assert_almost_equal(s[:rank], sa[:rank]) def test_norm_squared_norm(): X = np.random.RandomState(42).randn(50, 63) X *= 100 # check stability X += 200 assert_almost_equal(np.linalg.norm(X.ravel()), norm(X)) assert_almost_equal(norm(X) ** 2, squared_norm(X), decimal=6) assert_almost_equal(np.linalg.norm(X), np.sqrt(squared_norm(X)), decimal=6) def test_row_norms(): X = np.random.RandomState(42).randn(100, 100) sq_norm = (X ** 2).sum(axis=1) assert_array_almost_equal(sq_norm, row_norms(X, squared=True), 5) assert_array_almost_equal(np.sqrt(sq_norm), row_norms(X)) Xcsr = sparse.csr_matrix(X, dtype=np.float32) assert_array_almost_equal(sq_norm, row_norms(Xcsr, squared=True), 5) assert_array_almost_equal(np.sqrt(sq_norm), row_norms(Xcsr)) def test_randomized_svd_low_rank_with_noise(): # Check that extmath.randomized_svd can handle noisy matrices n_samples = 100 n_features = 500 rank = 5 k = 10 # generate a matrix X wity structure approximate rank `rank` and an # important noisy component X = make_low_rank_matrix(n_samples=n_samples, n_features=n_features, effective_rank=rank, tail_strength=0.5, random_state=0) assert_equal(X.shape, (n_samples, n_features)) # compute the singular values of X using the slow exact method _, s, _ = linalg.svd(X, full_matrices=False) # compute the singular values of X using the fast approximate method # without the iterated power method _, sa, _ = randomized_svd(X, k, n_iter=0) # the approximation does not tolerate the noise: assert_greater(np.abs(s[:k] - sa).max(), 0.05) # compute the singular values of X using the fast approximate method with # iterated power method _, sap, _ = randomized_svd(X, k, n_iter=5) # the iterated power method is helping getting rid of the noise: assert_almost_equal(s[:k], sap, decimal=3) def test_randomized_svd_infinite_rank(): # Check that extmath.randomized_svd can handle noisy matrices n_samples = 100 n_features = 500 rank = 5 k = 10 # let us try again without 'low_rank component': just regularly but slowly # decreasing singular values: the rank of the data matrix is infinite X = make_low_rank_matrix(n_samples=n_samples, n_features=n_features, effective_rank=rank, tail_strength=1.0, random_state=0) assert_equal(X.shape, (n_samples, n_features)) # compute the singular values of X using the slow exact method _, s, _ = linalg.svd(X, full_matrices=False) # compute the singular values of X using the fast approximate method # without the iterated power method _, sa, _ = randomized_svd(X, k, n_iter=0) # the approximation does not tolerate the noise: assert_greater(np.abs(s[:k] - sa).max(), 0.1) # compute the singular values of X using the fast approximate method with # iterated power method _, sap, _ = randomized_svd(X, k, n_iter=5) # the iterated power method is still managing to get most of the structure # at the requested rank assert_almost_equal(s[:k], sap, decimal=3) def test_randomized_svd_transpose_consistency(): # Check that transposing the design matrix has limit impact n_samples = 100 n_features = 500 rank = 4 k = 10 X = make_low_rank_matrix(n_samples=n_samples, n_features=n_features, effective_rank=rank, tail_strength=0.5, random_state=0) assert_equal(X.shape, (n_samples, n_features)) U1, s1, V1 = randomized_svd(X, k, n_iter=3, transpose=False, random_state=0) U2, s2, V2 = randomized_svd(X, k, n_iter=3, transpose=True, random_state=0) U3, s3, V3 = randomized_svd(X, k, n_iter=3, transpose='auto', random_state=0) U4, s4, V4 = linalg.svd(X, full_matrices=False) assert_almost_equal(s1, s4[:k], decimal=3) assert_almost_equal(s2, s4[:k], decimal=3) assert_almost_equal(s3, s4[:k], decimal=3) assert_almost_equal(np.dot(U1, V1), np.dot(U4[:, :k], V4[:k, :]), decimal=2) assert_almost_equal(np.dot(U2, V2), np.dot(U4[:, :k], V4[:k, :]), decimal=2) # in this case 'auto' is equivalent to transpose assert_almost_equal(s2, s3) def test_svd_flip(): # Check that svd_flip works in both situations, and reconstructs input. rs = np.random.RandomState(1999) n_samples = 20 n_features = 10 X = rs.randn(n_samples, n_features) # Check matrix reconstruction U, S, V = linalg.svd(X, full_matrices=False) U1, V1 = svd_flip(U, V, u_based_decision=False) assert_almost_equal(np.dot(U1 * S, V1), X, decimal=6) # Check transposed matrix reconstruction XT = X.T U, S, V = linalg.svd(XT, full_matrices=False) U2, V2 = svd_flip(U, V, u_based_decision=True) assert_almost_equal(np.dot(U2 * S, V2), XT, decimal=6) # Check that different flip methods are equivalent under reconstruction U_flip1, V_flip1 = svd_flip(U, V, u_based_decision=True) assert_almost_equal(np.dot(U_flip1 * S, V_flip1), XT, decimal=6) U_flip2, V_flip2 = svd_flip(U, V, u_based_decision=False) assert_almost_equal(np.dot(U_flip2 * S, V_flip2), XT, decimal=6) def test_randomized_svd_sign_flip(): a = np.array([[2.0, 0.0], [0.0, 1.0]]) u1, s1, v1 = randomized_svd(a, 2, flip_sign=True, random_state=41) for seed in range(10): u2, s2, v2 = randomized_svd(a, 2, flip_sign=True, random_state=seed) assert_almost_equal(u1, u2) assert_almost_equal(v1, v2) assert_almost_equal(np.dot(u2 * s2, v2), a) assert_almost_equal(np.dot(u2.T, u2), np.eye(2)) assert_almost_equal(np.dot(v2.T, v2), np.eye(2)) def test_cartesian(): # Check if cartesian product delivers the right results axes = (np.array([1, 2, 3]), np.array([4, 5]), np.array([6, 7])) true_out = np.array([[1, 4, 6], [1, 4, 7], [1, 5, 6], [1, 5, 7], [2, 4, 6], [2, 4, 7], [2, 5, 6], [2, 5, 7], [3, 4, 6], [3, 4, 7], [3, 5, 6], [3, 5, 7]]) out = cartesian(axes) assert_array_equal(true_out, out) # check single axis x = np.arange(3) assert_array_equal(x[:, np.newaxis], cartesian((x,))) def test_logistic_sigmoid(): # Check correctness and robustness of logistic sigmoid implementation naive_logistic = lambda x: 1 / (1 + np.exp(-x)) naive_log_logistic = lambda x: np.log(naive_logistic(x)) x = np.linspace(-2, 2, 50) assert_array_almost_equal(log_logistic(x), naive_log_logistic(x)) extreme_x = np.array([-100., 100.]) assert_array_almost_equal(log_logistic(extreme_x), [-100, 0]) def test_fast_dot(): # Check fast dot blas wrapper function if fast_dot is np.dot: return rng = np.random.RandomState(42) A = rng.random_sample([2, 10]) B = rng.random_sample([2, 10]) try: linalg.get_blas_funcs(['gemm'])[0] has_blas = True except (AttributeError, ValueError): has_blas = False if has_blas: # Test _fast_dot for invalid input. # Maltyped data. for dt1, dt2 in [['f8', 'f4'], ['i4', 'i4']]: assert_raises(ValueError, _fast_dot, A.astype(dt1), B.astype(dt2).T) # Malformed data. ## ndim == 0 E = np.empty(0) assert_raises(ValueError, _fast_dot, E, E) ## ndim == 1 assert_raises(ValueError, _fast_dot, A, A[0]) ## ndim > 2 assert_raises(ValueError, _fast_dot, A.T, np.array([A, A])) ## min(shape) == 1 assert_raises(ValueError, _fast_dot, A, A[0, :][None, :]) # test for matrix mismatch error assert_raises(ValueError, _fast_dot, A, A) # Test cov-like use case + dtypes. for dtype in ['f8', 'f4']: A = A.astype(dtype) B = B.astype(dtype) # col < row C = np.dot(A.T, A) C_ = fast_dot(A.T, A) assert_almost_equal(C, C_, decimal=5) C = np.dot(A.T, B) C_ = fast_dot(A.T, B) assert_almost_equal(C, C_, decimal=5) C = np.dot(A, B.T) C_ = fast_dot(A, B.T) assert_almost_equal(C, C_, decimal=5) # Test square matrix * rectangular use case. A = rng.random_sample([2, 2]) for dtype in ['f8', 'f4']: A = A.astype(dtype) B = B.astype(dtype) C = np.dot(A, B) C_ = fast_dot(A, B) assert_almost_equal(C, C_, decimal=5) C = np.dot(A.T, B) C_ = fast_dot(A.T, B) assert_almost_equal(C, C_, decimal=5) if has_blas: for x in [np.array([[d] * 10] * 2) for d in [np.inf, np.nan]]: assert_raises(ValueError, _fast_dot, x, x.T) def test_incremental_variance_update_formulas(): # Test Youngs and Cramer incremental variance formulas. # Doggie data from http://www.mathsisfun.com/data/standard-deviation.html A = np.array([[600, 470, 170, 430, 300], [600, 470, 170, 430, 300], [600, 470, 170, 430, 300], [600, 470, 170, 430, 300]]).T idx = 2 X1 = A[:idx, :] X2 = A[idx:, :] old_means = X1.mean(axis=0) old_variances = X1.var(axis=0) old_sample_count = X1.shape[0] final_means, final_variances, final_count = _batch_mean_variance_update( X2, old_means, old_variances, old_sample_count) assert_almost_equal(final_means, A.mean(axis=0), 6) assert_almost_equal(final_variances, A.var(axis=0), 6) assert_almost_equal(final_count, A.shape[0]) def test_incremental_variance_ddof(): # Test that degrees of freedom parameter for calculations are correct. rng = np.random.RandomState(1999) X = rng.randn(50, 10) n_samples, n_features = X.shape for batch_size in [11, 20, 37]: steps = np.arange(0, X.shape[0], batch_size) if steps[-1] != X.shape[0]: steps = np.hstack([steps, n_samples]) for i, j in zip(steps[:-1], steps[1:]): batch = X[i:j, :] if i == 0: incremental_means = batch.mean(axis=0) incremental_variances = batch.var(axis=0) # Assign this twice so that the test logic is consistent incremental_count = batch.shape[0] sample_count = batch.shape[0] else: result = _batch_mean_variance_update( batch, incremental_means, incremental_variances, sample_count) (incremental_means, incremental_variances, incremental_count) = result sample_count += batch.shape[0] calculated_means = np.mean(X[:j], axis=0) calculated_variances = np.var(X[:j], axis=0) assert_almost_equal(incremental_means, calculated_means, 6) assert_almost_equal(incremental_variances, calculated_variances, 6) assert_equal(incremental_count, sample_count) def test_vector_sign_flip(): # Testing that sign flip is working & largest value has positive sign data = np.random.RandomState(36).randn(5, 5) max_abs_rows = np.argmax(np.abs(data), axis=1) data_flipped = _deterministic_vector_sign_flip(data) max_rows = np.argmax(data_flipped, axis=1) assert_array_equal(max_abs_rows, max_rows) signs = np.sign(data[range(data.shape[0]), max_abs_rows]) assert_array_equal(data, data_flipped * signs[:, np.newaxis])
bsd-3-clause
heli522/scikit-learn
sklearn/tests/test_qda.py
154
3481
import numpy as np from sklearn.utils.testing import assert_array_equal from sklearn.utils.testing import assert_array_almost_equal from sklearn.utils.testing import assert_true from sklearn.utils.testing import assert_greater from sklearn.utils.testing import assert_raises from sklearn.utils.testing import ignore_warnings from sklearn import qda # Data is just 6 separable points in the plane X = np.array([[0, 0], [-2, -2], [-2, -1], [-1, -1], [-1, -2], [1, 3], [1, 2], [2, 1], [2, 2]]) y = np.array([1, 1, 1, 1, 1, 2, 2, 2, 2]) y3 = np.array([1, 2, 3, 2, 3, 1, 2, 3, 1]) # Degenerate data with 1 feature (still should be separable) X1 = np.array([[-3, ], [-2, ], [-1, ], [-1, ], [0, ], [1, ], [1, ], [2, ], [3, ]]) # Data that has zero variance in one dimension and needs regularization X2 = np.array([[-3, 0], [-2, 0], [-1, 0], [-1, 0], [0, 0], [1, 0], [1, 0], [2, 0], [3, 0]]) # One element class y4 = np.array([1, 1, 1, 1, 1, 1, 1, 1, 2]) # Data with less samples in a class than n_features X5 = np.c_[np.arange(8), np.zeros((8,3))] y5 = np.array([0, 0, 0, 0, 0, 1, 1, 1]) def test_qda(): # QDA classification. # This checks that QDA implements fit and predict and returns # correct values for a simple toy dataset. clf = qda.QDA() y_pred = clf.fit(X, y).predict(X) assert_array_equal(y_pred, y) # Assure that it works with 1D data y_pred1 = clf.fit(X1, y).predict(X1) assert_array_equal(y_pred1, y) # Test probas estimates y_proba_pred1 = clf.predict_proba(X1) assert_array_equal((y_proba_pred1[:, 1] > 0.5) + 1, y) y_log_proba_pred1 = clf.predict_log_proba(X1) assert_array_almost_equal(np.exp(y_log_proba_pred1), y_proba_pred1, 8) y_pred3 = clf.fit(X, y3).predict(X) # QDA shouldn't be able to separate those assert_true(np.any(y_pred3 != y3)) # Classes should have at least 2 elements assert_raises(ValueError, clf.fit, X, y4) def test_qda_priors(): clf = qda.QDA() y_pred = clf.fit(X, y).predict(X) n_pos = np.sum(y_pred == 2) neg = 1e-10 clf = qda.QDA(priors=np.array([neg, 1 - neg])) y_pred = clf.fit(X, y).predict(X) n_pos2 = np.sum(y_pred == 2) assert_greater(n_pos2, n_pos) def test_qda_store_covariances(): # The default is to not set the covariances_ attribute clf = qda.QDA().fit(X, y) assert_true(not hasattr(clf, 'covariances_')) # Test the actual attribute: clf = qda.QDA().fit(X, y, store_covariances=True) assert_true(hasattr(clf, 'covariances_')) assert_array_almost_equal( clf.covariances_[0], np.array([[0.7, 0.45], [0.45, 0.7]]) ) assert_array_almost_equal( clf.covariances_[1], np.array([[0.33333333, -0.33333333], [-0.33333333, 0.66666667]]) ) def test_qda_regularization(): # the default is reg_param=0. and will cause issues # when there is a constant variable clf = qda.QDA() with ignore_warnings(): y_pred = clf.fit(X2, y).predict(X2) assert_true(np.any(y_pred != y)) # adding a little regularization fixes the problem clf = qda.QDA(reg_param=0.01) with ignore_warnings(): clf.fit(X2, y) y_pred = clf.predict(X2) assert_array_equal(y_pred, y) # Case n_samples_in_a_class < n_features clf = qda.QDA(reg_param=0.1) with ignore_warnings(): clf.fit(X5, y5) y_pred5 = clf.predict(X5) assert_array_equal(y_pred5, y5)
bsd-3-clause
CodingCat/mxnet
example/stochastic-depth/sd_mnist.py
44
4374
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. ################################################################################ # A sanity check mainly for debugging purpose. See sd_cifar10.py for a non-trivial # example of stochastic depth on cifar10. ################################################################################ import os import sys import mxnet as mx import logging sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), ".."))) from utils import get_data import sd_module def get_conv( name, data, num_filter, kernel, stride, pad, with_relu, bn_momentum ): conv = mx.symbol.Convolution( name=name, data=data, num_filter=num_filter, kernel=kernel, stride=stride, pad=pad, no_bias=True ) bn = mx.symbol.BatchNorm( name=name + '_bn', data=conv, fix_gamma=False, momentum=bn_momentum, # Same with https://github.com/soumith/cudnn.torch/blob/master/BatchNormalization.lua # cuDNN v5 don't allow a small eps of 1e-5 eps=2e-5 ) return ( # It's better to remove ReLU here # https://github.com/gcr/torch-residual-networks mx.symbol.Activation(name=name + '_relu', data=bn, act_type='relu') if with_relu else bn ) death_rates = [0.3] contexts = [mx.context.cpu()] data = mx.symbol.Variable('data') conv = get_conv( name='conv0', data=data, num_filter=16, kernel=(3, 3), stride=(1, 1), pad=(1, 1), with_relu=True, bn_momentum=0.9 ) base_mod = mx.mod.Module(conv, label_names=None, context=contexts) mod_seq = mx.mod.SequentialModule() mod_seq.add(base_mod) for i in range(len(death_rates)): conv = get_conv( name='conv0_%d' % i, data=mx.sym.Variable('data_%d' % i), num_filter=16, kernel=(3, 3), stride=(1, 1), pad=(1, 1), with_relu=True, bn_momentum=0.9 ) conv = get_conv( name='conv1_%d' % i, data=conv, num_filter=16, kernel=(3, 3), stride=(1, 1), pad=(1, 1), with_relu=False, bn_momentum=0.9 ) mod = sd_module.StochasticDepthModule(conv, data_names=['data_%d' % i], context=contexts, death_rate=death_rates[i]) mod_seq.add(mod, auto_wiring=True) act = mx.sym.Activation(mx.sym.Variable('data_final'), act_type='relu') flat = mx.sym.Flatten(act) pred = mx.sym.FullyConnected(flat, num_hidden=10) softmax = mx.sym.SoftmaxOutput(pred, name='softmax') mod_seq.add(mx.mod.Module(softmax, context=contexts, data_names=['data_final']), auto_wiring=True, take_labels=True) n_epoch = 2 batch_size = 100 basedir = os.path.dirname(__file__) get_data.get_mnist(os.path.join(basedir, "data")) train = mx.io.MNISTIter( image=os.path.join(basedir, "data", "train-images-idx3-ubyte"), label=os.path.join(basedir, "data", "train-labels-idx1-ubyte"), input_shape=(1, 28, 28), flat=False, batch_size=batch_size, shuffle=True, silent=False, seed=10) val = mx.io.MNISTIter( image=os.path.join(basedir, "data", "t10k-images-idx3-ubyte"), label=os.path.join(basedir, "data", "t10k-labels-idx1-ubyte"), input_shape=(1, 28, 28), flat=False, batch_size=batch_size, shuffle=True, silent=False) logging.basicConfig(level=logging.DEBUG) mod_seq.fit(train, val, optimizer_params={'learning_rate': 0.01, 'momentum': 0.9}, num_epoch=n_epoch, batch_end_callback=mx.callback.Speedometer(batch_size, 10))
apache-2.0
heli522/scikit-learn
examples/cluster/plot_ward_structured_vs_unstructured.py
317
3369
""" =========================================================== Hierarchical clustering: structured vs unstructured ward =========================================================== Example builds a swiss roll dataset and runs hierarchical clustering on their position. For more information, see :ref:`hierarchical_clustering`. In a first step, the hierarchical clustering is performed without connectivity constraints on the structure and is solely based on distance, whereas in a second step the clustering is restricted to the k-Nearest Neighbors graph: it's a hierarchical clustering with structure prior. Some of the clusters learned without connectivity constraints do not respect the structure of the swiss roll and extend across different folds of the manifolds. On the opposite, when opposing connectivity constraints, the clusters form a nice parcellation of the swiss roll. """ # Authors : Vincent Michel, 2010 # Alexandre Gramfort, 2010 # Gael Varoquaux, 2010 # License: BSD 3 clause print(__doc__) import time as time import numpy as np import matplotlib.pyplot as plt import mpl_toolkits.mplot3d.axes3d as p3 from sklearn.cluster import AgglomerativeClustering from sklearn.datasets.samples_generator import make_swiss_roll ############################################################################### # Generate data (swiss roll dataset) n_samples = 1500 noise = 0.05 X, _ = make_swiss_roll(n_samples, noise) # Make it thinner X[:, 1] *= .5 ############################################################################### # Compute clustering print("Compute unstructured hierarchical clustering...") st = time.time() ward = AgglomerativeClustering(n_clusters=6, linkage='ward').fit(X) elapsed_time = time.time() - st label = ward.labels_ print("Elapsed time: %.2fs" % elapsed_time) print("Number of points: %i" % label.size) ############################################################################### # Plot result fig = plt.figure() ax = p3.Axes3D(fig) ax.view_init(7, -80) for l in np.unique(label): ax.plot3D(X[label == l, 0], X[label == l, 1], X[label == l, 2], 'o', color=plt.cm.jet(np.float(l) / np.max(label + 1))) plt.title('Without connectivity constraints (time %.2fs)' % elapsed_time) ############################################################################### # Define the structure A of the data. Here a 10 nearest neighbors from sklearn.neighbors import kneighbors_graph connectivity = kneighbors_graph(X, n_neighbors=10, include_self=False) ############################################################################### # Compute clustering print("Compute structured hierarchical clustering...") st = time.time() ward = AgglomerativeClustering(n_clusters=6, connectivity=connectivity, linkage='ward').fit(X) elapsed_time = time.time() - st label = ward.labels_ print("Elapsed time: %.2fs" % elapsed_time) print("Number of points: %i" % label.size) ############################################################################### # Plot result fig = plt.figure() ax = p3.Axes3D(fig) ax.view_init(7, -80) for l in np.unique(label): ax.plot3D(X[label == l, 0], X[label == l, 1], X[label == l, 2], 'o', color=plt.cm.jet(float(l) / np.max(label + 1))) plt.title('With connectivity constraints (time %.2fs)' % elapsed_time) plt.show()
bsd-3-clause
arabenjamin/scikit-learn
examples/cluster/plot_ward_structured_vs_unstructured.py
317
3369
""" =========================================================== Hierarchical clustering: structured vs unstructured ward =========================================================== Example builds a swiss roll dataset and runs hierarchical clustering on their position. For more information, see :ref:`hierarchical_clustering`. In a first step, the hierarchical clustering is performed without connectivity constraints on the structure and is solely based on distance, whereas in a second step the clustering is restricted to the k-Nearest Neighbors graph: it's a hierarchical clustering with structure prior. Some of the clusters learned without connectivity constraints do not respect the structure of the swiss roll and extend across different folds of the manifolds. On the opposite, when opposing connectivity constraints, the clusters form a nice parcellation of the swiss roll. """ # Authors : Vincent Michel, 2010 # Alexandre Gramfort, 2010 # Gael Varoquaux, 2010 # License: BSD 3 clause print(__doc__) import time as time import numpy as np import matplotlib.pyplot as plt import mpl_toolkits.mplot3d.axes3d as p3 from sklearn.cluster import AgglomerativeClustering from sklearn.datasets.samples_generator import make_swiss_roll ############################################################################### # Generate data (swiss roll dataset) n_samples = 1500 noise = 0.05 X, _ = make_swiss_roll(n_samples, noise) # Make it thinner X[:, 1] *= .5 ############################################################################### # Compute clustering print("Compute unstructured hierarchical clustering...") st = time.time() ward = AgglomerativeClustering(n_clusters=6, linkage='ward').fit(X) elapsed_time = time.time() - st label = ward.labels_ print("Elapsed time: %.2fs" % elapsed_time) print("Number of points: %i" % label.size) ############################################################################### # Plot result fig = plt.figure() ax = p3.Axes3D(fig) ax.view_init(7, -80) for l in np.unique(label): ax.plot3D(X[label == l, 0], X[label == l, 1], X[label == l, 2], 'o', color=plt.cm.jet(np.float(l) / np.max(label + 1))) plt.title('Without connectivity constraints (time %.2fs)' % elapsed_time) ############################################################################### # Define the structure A of the data. Here a 10 nearest neighbors from sklearn.neighbors import kneighbors_graph connectivity = kneighbors_graph(X, n_neighbors=10, include_self=False) ############################################################################### # Compute clustering print("Compute structured hierarchical clustering...") st = time.time() ward = AgglomerativeClustering(n_clusters=6, connectivity=connectivity, linkage='ward').fit(X) elapsed_time = time.time() - st label = ward.labels_ print("Elapsed time: %.2fs" % elapsed_time) print("Number of points: %i" % label.size) ############################################################################### # Plot result fig = plt.figure() ax = p3.Axes3D(fig) ax.view_init(7, -80) for l in np.unique(label): ax.plot3D(X[label == l, 0], X[label == l, 1], X[label == l, 2], 'o', color=plt.cm.jet(float(l) / np.max(label + 1))) plt.title('With connectivity constraints (time %.2fs)' % elapsed_time) plt.show()
bsd-3-clause
lmcinnes/umap
umap/aligned_umap.py
1
19553
import numpy as np import numba from sklearn.base import BaseEstimator from sklearn.utils import check_random_state, check_array from umap.sparse import arr_intersect as intersect1d from umap.sparse import arr_union as union1d from umap.umap_ import UMAP, make_epochs_per_sample from umap.spectral import spectral_layout from umap.layouts import optimize_layout_aligned_euclidean INT32_MIN = np.iinfo(np.int32).min + 1 INT32_MAX = np.iinfo(np.int32).max - 1 @numba.njit(parallel=True) def in1d(arr, test_set): test_set = set(test_set) result = np.empty(arr.shape[0], dtype=np.bool_) for i in numba.prange(arr.shape[0]): if arr[i] in test_set: result[i] = True else: result[i] = False return result def invert_dict(d): return {value: key for key, value in d.items()} @numba.njit() def procrustes_align(embedding_base, embedding_to_align, anchors): subset1 = embedding_base[anchors[0]] subset2 = embedding_to_align[anchors[1]] M = subset2.T @ subset1 U, S, V = np.linalg.svd(M) R = U @ V return embedding_to_align @ R def expand_relations(relation_dicts, window_size=3): max_n_samples = ( max( [max(d.keys()) for d in relation_dicts] + [max(d.values()) for d in relation_dicts] ) + 1 ) result = np.full( (len(relation_dicts) + 1, 2 * window_size + 1, max_n_samples), -1, dtype=np.int32, ) reverse_relation_dicts = [invert_dict(d) for d in relation_dicts] for i in range(result.shape[0]): for j in range(window_size): result_index = (window_size) + (j + 1) if i + j + 1 >= len(relation_dicts): result[i, result_index] = np.full(max_n_samples, -1, dtype=np.int32) else: mapping = np.arange(max_n_samples) for k in range(j + 1): mapping = np.array( [relation_dicts[i + k].get(n, -1) for n in mapping] ) result[i, result_index] = mapping for j in range(0, -window_size, -1): result_index = (window_size) + (j - 1) if i + j - 1 < 0: result[i, result_index] = np.full(max_n_samples, -1, dtype=np.int32) else: mapping = np.arange(max_n_samples) for k in range(0, j - 1, -1): mapping = np.array( [reverse_relation_dicts[i + k - 1].get(n, -1) for n in mapping] ) result[i, result_index] = mapping return result @numba.njit() def build_neighborhood_similarities(graphs_indptr, graphs_indices, relations): result = np.zeros(relations.shape, dtype=np.float32) center_index = (relations.shape[1] - 1) // 2 for i in range(relations.shape[0]): base_graph_indptr = graphs_indptr[i] base_graph_indices = graphs_indices[i] for j in range(relations.shape[1]): if i + j - center_index < 0 or i + j - center_index >= len(graphs_indptr): continue comparison_graph_indptr = graphs_indptr[i + j - center_index] comparison_graph_indices = graphs_indices[i + j - center_index] for k in range(relations.shape[2]): comparison_index = relations[i, j, k] if comparison_index < 0: continue raw_base_graph_indices = base_graph_indices[ base_graph_indptr[k] : base_graph_indptr[k + 1] ].copy() base_indices = relations[i, j][raw_base_graph_indices[ raw_base_graph_indices < relations.shape[2]]] base_indices = base_indices[base_indices >= 0] comparison_indices = comparison_graph_indices[ comparison_graph_indptr[comparison_index] : comparison_graph_indptr[ comparison_index + 1 ] ] comparison_indices = comparison_indices[ in1d(comparison_indices, relations[i, j]) ] intersection_size = intersect1d(base_indices, comparison_indices).shape[ 0 ] union_size = union1d(base_indices, comparison_indices).shape[0] if union_size > 0: result[i, j, k] = intersection_size / union_size else: result[i, j, k] = 1.0 return result def get_nth_item_or_val(iterable_or_val, n): if iterable_or_val is None: return None if type(iterable_or_val) in (list, tuple, np.ndarray): return iterable_or_val[n] elif type(iterable_or_val) in (int, float, bool, None): return iterable_or_val else: raise ValueError("Unrecognized parameter type") PARAM_NAMES = ( "n_neighbors", "n_components", "metric", "metric_kwds", "n_epochs", "learning_rate", "init", "min_dist", "spread", "set_op_mix_ratio", "local_connectivity", "repulsion_strength", "negative_sample_rate", "transform_queue_size", "angular_rp_forest", "target_n_neighbors", "target_metric", "target_metric_kwds", "target_weight", "unique", ) def set_aligned_params(new_params, existing_params, n_models, param_names=PARAM_NAMES): for param in param_names: if param in new_params: if isinstance(existing_params[param], list): existing_params[param].append(new_params[param]) elif isinstance(existing_params[param], tuple): existing_params[param] = existing_params[param] + \ (new_params[param],) elif isinstance(existing_params[param], np.ndarray): existing_params[param] = np.append(existing_params[param], new_params[param]) else: if new_params[param] != existing_params[param]: existing_params[param] = (existing_params[param],) * n_models + ( new_params[param], ) return existing_params @numba.njit() def init_from_existing_internal( previous_embedding, weights_indptr, weights_indices, weights_data, relation_dict ): n_samples = weights_indptr.shape[0] - 1 n_features = previous_embedding.shape[1] result = np.zeros((n_samples, n_features), dtype=np.float32) for i in range(n_samples): if i in relation_dict: result[i] = previous_embedding[relation_dict[i]] else: normalisation = 0.0 for idx in range(weights_indptr[i], weights_indptr[i + 1]): j = weights_indices[idx] if j in relation_dict: normalisation += weights_data[idx] result[i] += ( weights_data[idx] * previous_embedding[relation_dict[j]] ) if normalisation == 0: result[i] = np.random.uniform(-10.0, 10.0, n_features) else: result[i] /= normalisation return result def init_from_existing(previous_embedding, graph, relations): typed_relations = numba.typed.Dict.empty(numba.types.int32, numba.types.int32) for key, val in relations.items(): typed_relations[np.int32(key)] = np.int32(val) return init_from_existing_internal( previous_embedding, graph.indptr, graph.indices, graph.data, typed_relations, ) class AlignedUMAP(BaseEstimator): def __init__( self, n_neighbors=15, n_components=2, metric="euclidean", metric_kwds=None, n_epochs=None, learning_rate=1.0, init="spectral", alignment_regularisation=1.0e-2, alignment_window_size=3, min_dist=0.1, spread=1.0, low_memory=False, set_op_mix_ratio=1.0, local_connectivity=1.0, repulsion_strength=1.0, negative_sample_rate=5, transform_queue_size=4.0, a=None, b=None, random_state=None, angular_rp_forest=False, target_n_neighbors=-1, target_metric="categorical", target_metric_kwds=None, target_weight=0.5, transform_seed=42, force_approximation_algorithm=False, verbose=False, unique=False, ): self.n_neighbors = n_neighbors self.metric = metric self.metric_kwds = metric_kwds self.n_epochs = n_epochs self.init = init self.n_components = n_components self.repulsion_strength = repulsion_strength self.learning_rate = learning_rate self.alignment_regularisation = alignment_regularisation self.alignment_window_size = alignment_window_size self.spread = spread self.min_dist = min_dist self.low_memory = low_memory self.set_op_mix_ratio = set_op_mix_ratio self.local_connectivity = local_connectivity self.negative_sample_rate = negative_sample_rate self.random_state = random_state self.angular_rp_forest = angular_rp_forest self.transform_queue_size = transform_queue_size self.target_n_neighbors = target_n_neighbors self.target_metric = target_metric self.target_metric_kwds = target_metric_kwds self.target_weight = target_weight self.transform_seed = transform_seed self.force_approximation_algorithm = force_approximation_algorithm self.verbose = verbose self.unique = unique self.a = a self.b = b def fit(self, X, y=None, **fit_params): if "relations" not in fit_params: raise ValueError( "Aligned UMAP requires relations between data to be " "specified" ) self.dict_relations_ = fit_params["relations"] assert type(self.dict_relations_) in (list, tuple) assert type(X) in (list, tuple, np.ndarray) assert (len(X) - 1) == (len(self.dict_relations_)) if y is not None: assert type(y) in (list, tuple, np.ndarray) assert (len(y) - 1) == (len(self.dict_relations_)) else: y = [None] * len(X) # We need n_components to be constant or this won't work if type(self.n_components) in (list, tuple, np.ndarray): raise ValueError("n_components must be a single integer, and cannot vary") self.n_models_ = len(X) self.mappers_ = [ UMAP( n_neighbors=get_nth_item_or_val(self.n_neighbors, n), min_dist=get_nth_item_or_val(self.min_dist, n), n_epochs=get_nth_item_or_val(self.n_epochs, n), repulsion_strength=get_nth_item_or_val(self.repulsion_strength, n), learning_rate=get_nth_item_or_val(self.learning_rate, n), spread=get_nth_item_or_val(self.spread, n), negative_sample_rate=get_nth_item_or_val(self.negative_sample_rate, n), local_connectivity=get_nth_item_or_val(self.local_connectivity, n), set_op_mix_ratio=get_nth_item_or_val(self.set_op_mix_ratio, n), unique=get_nth_item_or_val(self.unique, n), n_components=self.n_components, metric=self.metric, metric_kwds=self.metric_kwds, low_memory=self.low_memory, random_state=self.random_state, angular_rp_forest=self.angular_rp_forest, transform_queue_size=self.transform_queue_size, target_n_neighbors=self.target_n_neighbors, target_metric=self.target_metric, target_metric_kwds=self.target_metric_kwds, target_weight=self.target_weight, transform_seed=self.transform_seed, force_approximation_algorithm=self.force_approximation_algorithm, verbose=self.verbose, a=self.a, b=self.b, ).fit(X[n], y[n]) for n in range(self.n_models_) ] if self.n_epochs is None: n_epochs = 200 else: n_epochs = self.n_epochs window_size = fit_params.get("window_size", self.alignment_window_size) relations = expand_relations(self.dict_relations_, window_size) indptr_list = numba.typed.List.empty_list(numba.types.int32[::1]) indices_list = numba.typed.List.empty_list(numba.types.int32[::1]) heads = numba.typed.List.empty_list(numba.types.int32[::1]) tails = numba.typed.List.empty_list(numba.types.int32[::1]) epochs_per_samples = numba.typed.List.empty_list(numba.types.float64[::1]) for mapper in self.mappers_: indptr_list.append(mapper.graph_.indptr) indices_list.append(mapper.graph_.indices) heads.append(mapper.graph_.tocoo().row) tails.append(mapper.graph_.tocoo().col) epochs_per_samples.append( make_epochs_per_sample(mapper.graph_.tocoo().data, n_epochs) ) rng_state_transform = np.random.RandomState(self.transform_seed) regularisation_weights = build_neighborhood_similarities( indptr_list, indices_list, relations, ) first_init = spectral_layout( self.mappers_[0]._raw_data, self.mappers_[0].graph_, self.n_components, rng_state_transform, ) expansion = 10.0 / np.abs(first_init).max() first_embedding = (first_init * expansion).astype( np.float32, order="C", ) embeddings = numba.typed.List.empty_list(numba.types.float32[:, ::1]) embeddings.append(first_embedding) for i in range(1, self.n_models_): next_init = spectral_layout( self.mappers_[i]._raw_data, self.mappers_[i].graph_, self.n_components, rng_state_transform, ) expansion = 10.0 / np.abs(next_init).max() next_embedding = (next_init * expansion).astype( np.float32, order="C", ) anchor_data = relations[i][window_size - 1] left_anchors = anchor_data[anchor_data >= 0] right_anchors = np.where(anchor_data >= 0)[0] embeddings.append( procrustes_align( embeddings[-1], next_embedding, np.vstack([left_anchors, right_anchors]), ) ) seed_triplet = rng_state_transform.randint(INT32_MIN, INT32_MAX, 3).astype( np.int64 ) self.embeddings_ = optimize_layout_aligned_euclidean( embeddings, embeddings, heads, tails, n_epochs, epochs_per_samples, regularisation_weights, relations, seed_triplet, lambda_=self.alignment_regularisation, move_other=True, ) for i, embedding in enumerate(self.embeddings_): disconnected_vertices = ( np.array(self.mappers_[i].graph_.sum(axis=1)).flatten() == 0 ) embedding[disconnected_vertices] = np.full(self.n_components, np.nan) return self def fit_transform(self, X, y=None, **fit_params): self.fit(X, y, **fit_params) return self.embeddings_ def update(self, X, y=None, **fit_params): if "relations" not in fit_params: raise ValueError( "Aligned UMAP requires relations between data to be " "specified" ) new_dict_relations = fit_params["relations"] X = check_array(X) self.__dict__ = set_aligned_params(fit_params, self.__dict__, self.n_models_) new_mapper = UMAP( n_neighbors=get_nth_item_or_val(self.n_neighbors, self.n_models_), min_dist=get_nth_item_or_val(self.min_dist, self.n_models_), n_epochs=get_nth_item_or_val(self.n_epochs, self.n_models_), repulsion_strength=get_nth_item_or_val( self.repulsion_strength, self.n_models_ ), learning_rate=get_nth_item_or_val(self.learning_rate, self.n_models_), spread=get_nth_item_or_val(self.spread, self.n_models_), negative_sample_rate=get_nth_item_or_val( self.negative_sample_rate, self.n_models_ ), local_connectivity=get_nth_item_or_val( self.local_connectivity, self.n_models_ ), set_op_mix_ratio=get_nth_item_or_val(self.set_op_mix_ratio, self.n_models_), unique=get_nth_item_or_val(self.unique, self.n_models_), n_components=self.n_components, random_state=self.random_state, transform_seed=self.transform_seed, ).fit(X, y) self.n_models_ += 1 self.mappers_ += [new_mapper] # TODO: We can likely make this more efficient and not recompute each time self.dict_relations_ += [invert_dict(new_dict_relations)] if self.n_epochs is None: n_epochs = 200 else: n_epochs = self.n_epochs indptr_list = numba.typed.List.empty_list(numba.types.int32[::1]) indices_list = numba.typed.List.empty_list(numba.types.int32[::1]) heads = numba.typed.List.empty_list(numba.types.int32[::1]) tails = numba.typed.List.empty_list(numba.types.int32[::1]) epochs_per_samples = numba.typed.List.empty_list(numba.types.float64[::1]) for i, mapper in enumerate(self.mappers_): indptr_list.append(mapper.graph_.indptr) indices_list.append(mapper.graph_.indices) heads.append(mapper.graph_.tocoo().row) tails.append(mapper.graph_.tocoo().col) if i == len(self.mappers_) - 1: epochs_per_samples.append( make_epochs_per_sample(mapper.graph_.tocoo().data, n_epochs) ) else: epochs_per_samples.append( np.full(mapper.embedding_.shape[0], n_epochs + 1, dtype=np.float64) ) new_relations = expand_relations(self.dict_relations_) new_regularisation_weights = build_neighborhood_similarities( indptr_list, indices_list, new_relations, ) new_embedding = init_from_existing( self.embeddings_[-1], new_mapper.graph_, new_dict_relations ) self.embeddings_.append(new_embedding) rng_state_transform = np.random.RandomState(self.transform_seed) seed_triplet = rng_state_transform.randint(INT32_MIN, INT32_MAX, 3).astype( np.int64 ) self.embeddings_ = optimize_layout_aligned_euclidean( self.embeddings_, self.embeddings_, heads, tails, n_epochs, epochs_per_samples, new_regularisation_weights, new_relations, seed_triplet, lambda_=self.alignment_regularisation, )
bsd-3-clause
jhaux/tensorflow
tensorflow/contrib/learn/python/learn/datasets/text_datasets.py
122
2703
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Text datasets.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import os import tarfile import numpy as np from tensorflow.contrib.learn.python.learn.datasets import base from tensorflow.python.platform import gfile DBPEDIA_URL = 'https://github.com/le-scientifique/torchDatasets/raw/master/dbpedia_csv.tar.gz' def maybe_download_dbpedia(data_dir): """Download if DBpedia data is not present.""" train_path = os.path.join(data_dir, 'dbpedia_csv/train.csv') test_path = os.path.join(data_dir, 'dbpedia_csv/test.csv') if not (gfile.Exists(train_path) and gfile.Exists(test_path)): archive_path = base.maybe_download( 'dbpedia_csv.tar.gz', data_dir, DBPEDIA_URL) tfile = tarfile.open(archive_path, 'r:*') tfile.extractall(data_dir) def load_dbpedia(size='small', test_with_fake_data=False): """Get DBpedia datasets from CSV files.""" if not test_with_fake_data: data_dir = os.path.join(os.getenv('TF_EXP_BASE_DIR', ''), 'dbpedia_data') maybe_download_dbpedia(data_dir) train_path = os.path.join(data_dir, 'dbpedia_csv', 'train.csv') test_path = os.path.join(data_dir, 'dbpedia_csv', 'test.csv') if size == 'small': # Reduce the size of original data by a factor of 1000. base.shrink_csv(train_path, 1000) base.shrink_csv(test_path, 1000) train_path = train_path.replace('train.csv', 'train_small.csv') test_path = test_path.replace('test.csv', 'test_small.csv') else: module_path = os.path.dirname(__file__) train_path = os.path.join(module_path, 'data', 'text_train.csv') test_path = os.path.join(module_path, 'data', 'text_test.csv') train = base.load_csv_without_header( train_path, target_dtype=np.int32, features_dtype=np.str, target_column=0) test = base.load_csv_without_header( test_path, target_dtype=np.int32, features_dtype=np.str, target_column=0) return base.Datasets(train=train, validation=None, test=test)
apache-2.0
negrinho/deep_architect
deep_architect/contrib/misc/evaluators/tensorflow/tpu_estimator_classification.py
1
15062
from __future__ import print_function import gc import logging import tensorflow as tf import numpy as np from deep_architect.contrib.misc.datasets.cifar10_tf import Cifar10DataSet import deep_architect.contrib.misc.evaluators.tensorflow.gcloud_utils as gcu import deep_architect.core as co import deep_architect.utils as ut import deep_architect.helpers.tensorflow_eager_support as htfe logger = logging.getLogger(__name__) IMAGE_HEIGHT = 32 IMAGE_WIDTH = 32 IMAGE_DEPTH = 3 def set_recompile(outputs, recompile): def fn(mx): mx._is_compiled = not recompile co.traverse_backward(outputs, fn) logger.debug('set_recompile') def input_fn(mode, data_dir, batch_size=128, train=True): data = Cifar10DataSet(data_dir, subset=mode, use_distortion=train).make_batch(batch_size) return data def record_summaries(metric_dict, step): for key, value in metric_dict.items(): tf.contrib.summary.scalar(name=key, tensor=value, step=step) def construct_host_fn(metric_dict, model_dir, prefix='', max_queue_size=10): metric_names = list(metric_dict.keys()) def host_fn(gs, *args): step = gs[0] with tf.contrib.summary.create_file_writer( logdir=model_dir, max_queue=max_queue_size).as_default(): with tf.contrib.summary.always_record_summaries(): for i, metric in enumerate(metric_names): tf.contrib.summary.scalar(prefix + metric, args[i][0], step=step) return tf.contrib.summary.all_summary_ops() gs_t = tf.reshape(tf.train.get_or_create_global_step(), [1]) other_tensors = [tf.reshape(metric_dict[key], [1]) for key in metric_names] return host_fn, [gs_t] + other_tensors def get_optimizer(optimizer_type, learning_rate): if optimizer_type == 'adam': return tf.train.AdamOptimizer(learning_rate=learning_rate) elif optimizer_type == 'sgd_mom': return tf.train.MomentumOptimizer(learning_rate, momentum=.9) elif optimizer_type == 'rmsprop': return tf.train.RMSPropOptimizer(learning_rate, momentum=.9, epsilon=1.0) else: raise ValueError('Optimizer type not recognized: %s' % optimizer_type) class TPUEstimatorEvaluator: def __init__(self, data_dir, tpu_name, max_num_training_epochs=200, stop_patience=20, optimizer_type='adam', batch_size=256, lr_decay_method='constant', init_lr=.001, lr_decay_value=.97, lr_num_epochs_per_decay=2.4, lr_warmup_epochs=3.0, weight_decay=0.0, display_step=1, log_output_to_terminal=True, base_dir='./scratch', delete_scratch_after_use=False, epochs_between_evals=100, use_tpu=True): self.tpu_name = tpu_name self.num_examples = Cifar10DataSet.num_examples_per_epoch() self.batch_size = batch_size self.data_dir = data_dir self.steps_per_epoch = self.num_examples // self.batch_size self.steps_per_val_epoch = Cifar10DataSet.num_examples_per_epoch( subset='validation') / self.batch_size self.steps_per_test_epoch = Cifar10DataSet.num_examples_per_epoch( subset='eval') / self.batch_size self.max_num_training_epochs = max_num_training_epochs self.epochs_between_evals = epochs_between_evals self.display_step = display_step self.stop_patience = stop_patience self.lr_decay_method = lr_decay_method self.init_lr = init_lr * 8 if use_tpu else init_lr self.lr_decay_value = lr_decay_value self.lr_num_epochs_per_decay = lr_num_epochs_per_decay self.lr_warmup_epochs = lr_warmup_epochs self.weight_decay = weight_decay self.optimizer_type = optimizer_type self.log_output_to_terminal = log_output_to_terminal self.base_dir = base_dir self.use_tpu = use_tpu self.delete_scratch_after_use = delete_scratch_after_use self.num_parameters = -1 def get_learning_rate(self, step): total_steps = int(self.max_num_training_epochs * self.num_examples / self.batch_size) if self.lr_decay_method == 'constant': lr = self.init_lr elif self.lr_decay_method == 'cosine': lr = tf.train.cosine_decay(self.init_lr, step, total_steps) elif self.lr_decay_method == 'stepwise': # divide LR by 10 at 1/2, 2/3, and 5/6 of total epochs boundaries = [ int(0.5 * total_steps), int(0.667 * total_steps), int(0.833 * total_steps) ] values = [ 1.0 * self.init_lr, 0.1 * self.init_lr, 0.01 * self.init_lr, 0.0001 * self.init_lr ] lr = tf.train.piecewise_constant(step, boundaries, values) else: lr = tf.train.exponential_decay(self.init_lr, step, self.steps_per_epoch * self.lr_num_epochs_per_decay, self.lr_decay_value, staircase=True) warmup_steps = int(self.lr_warmup_epochs * self.steps_per_epoch) warmup_lr = (self.init_lr * tf.cast(step, tf.float32) / tf.cast(warmup_steps, tf.float32)) lr = tf.cond(step < warmup_steps, lambda: warmup_lr, lambda: lr) lr = tf.maximum(lr, 0.0001 * self.init_lr) return lr def eval(self, inputs, outputs, save_fn=None, state=None): tf.reset_default_graph() self.num_parameters = -1 logger.debug('In Evaluator') if state is not None and 'model_dir' in state: model_dir = state['model_dir'] else: model_dir = gcu.get_empty_bucket_folder(self.base_dir) if save_fn: save_fn({'model_dir': model_dir}) logger.info('Using folder %s for evaluation', model_dir) def metric_fn(labels, predictions): return {'accuracy': tf.metrics.accuracy(labels, predictions)} def model_fn(features, labels, mode, params): set_recompile(outputs, True) gc.collect() htfe.set_is_training(outputs, mode == tf.estimator.ModeKeys.TRAIN) step = tf.train.get_or_create_global_step() if 'in' in inputs: co.forward({inputs['in']: features}) logits = outputs['out'].val else: co.forward({ inputs['in0']: features, inputs['in1']: float(self.steps_per_epoch * self.max_num_training_epochs) }) logits = outputs['out1'].val aux_logits = outputs['out0'].val predicted_classes = tf.argmax(logits, 1, output_type=tf.int32) if mode == tf.estimator.ModeKeys.PREDICT: predictions = { 'class_ids': predicted_classes[:, tf.newaxis], 'probabilities': tf.nn.softmax(logits), 'logits': logits, } return tf.estimator.EstimatorSpec(mode, predictions=predictions) # define loss and optimizer train_vars = tf.trainable_variables() with tf.variable_scope('l2'): l2_loss = tf.add_n([ tf.nn.l2_loss(v) for v in train_vars if 'kernel' in v.name ]) * self.weight_decay onehot_labels = tf.one_hot(labels, 10) unreg_loss = tf.losses.softmax_cross_entropy( onehot_labels=onehot_labels, logits=logits, reduction=tf.losses.Reduction.MEAN) aux_loss = tf.losses.softmax_cross_entropy( onehot_labels=onehot_labels, logits=aux_logits, weights=.5, reduction=tf.losses.Reduction.MEAN) if 'out1' in outputs else 0 loss = unreg_loss + l2_loss + aux_loss if mode == tf.estimator.ModeKeys.EVAL: return tf.contrib.tpu.TPUEstimatorSpec( mode, loss=loss, eval_metrics=(metric_fn, [labels, predicted_classes])) # Create training op. assert mode == tf.estimator.ModeKeys.TRAIN if self.num_parameters == -1: self.num_parameters = np.sum([ np.prod(v.get_shape().as_list()) for v in tf.trainable_variables() ]) accuracy = metric_fn(labels, predicted_classes)['accuracy'] tf.identity(accuracy[1], name='train_accuracy') learning_rate = self.get_learning_rate(step) metric_dict = { 'batch_loss': loss, 'learning_rate': learning_rate, 'batch_accuracy': tf.reduce_mean( tf.cast(tf.equal(predicted_classes, labels), tf.float32)) } host_fn = None optimizer = get_optimizer(self.optimizer_type, learning_rate) if self.use_tpu: host_fn = construct_host_fn(metric_dict, model_dir, prefix='training/', max_queue_size=self.steps_per_epoch) optimizer = tf.contrib.tpu.CrossShardOptimizer(optimizer) else: record_summaries(metric_dict, step) update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS) with tf.control_dependencies(update_ops): gvs = optimizer.compute_gradients(loss) gvs = [(tf.where(tf.is_nan(grad), tf.zeros_like(grad), grad), val) for grad, val in gvs] train_op = optimizer.apply_gradients( gvs, global_step=tf.train.get_or_create_global_step()) return tf.contrib.tpu.TPUEstimatorSpec(mode, loss=loss, train_op=train_op, host_call=host_fn) my_project_name = gcu.get_gcloud_project() my_zone = gcu.get_gcloud_zone() cluster_resolver = tf.contrib.cluster_resolver.TPUClusterResolver( self.tpu_name.split(','), zone=my_zone, project=my_project_name) run_config = tf.contrib.tpu.RunConfig( cluster=cluster_resolver, model_dir=model_dir, save_checkpoints_steps=self.max_num_training_epochs * self.steps_per_epoch, keep_checkpoint_max=1, log_step_count_steps=self.steps_per_epoch, tpu_config=tf.contrib.tpu.TPUConfig( iterations_per_loop=self.steps_per_epoch, num_shards=8), ) results = {} try: estimator = tf.contrib.tpu.TPUEstimator( model_fn=model_fn, config=run_config, use_tpu=self.use_tpu, train_batch_size=self.batch_size, eval_batch_size=self.batch_size, predict_batch_size=self.batch_size, params={}) timer_manager = ut.TimerManager() timer_manager.create_timer('eval') train_fn = lambda params: input_fn('train', self.data_dir, batch_size=params['batch_size'], train=True) val_fn = lambda params: input_fn('validation', self.data_dir, batch_size=params['batch_size'], train=False) try: estimator.train( input_fn=train_fn, max_steps=2 #self.steps_per_epoch * # self.max_num_training_epochs ) except (tf.train.NanLossDuringTrainingError, tf.errors.InvalidArgumentError): logger.warning( 'Architecture in %s received nan loss in training', model_dir) logger.debug("Optimization Finished!") val_fn = lambda params: input_fn('validation', self.data_dir, batch_size=params['batch_size'], train=False) timer_manager.tick_timer('eval') eval_results = estimator.evaluate( input_fn=val_fn, steps=2) #self.steps_per_val_epoch) t_infer = ( timer_manager.get_time_since_last_tick('eval', 'miliseconds') / Cifar10DataSet.num_examples_per_epoch('validation')) val_acc = float(eval_results['accuracy']) logger.debug("Validation accuracy: %f", val_acc) results = { 'validation_accuracy': val_acc, 'num_parameters': self.num_parameters, 'inference_time_per_example_in_miliseconds': t_infer, 'epoch': int(eval_results['global_step']) / self.steps_per_epoch } test_fn = lambda params: input_fn('eval', self.data_dir, batch_size=params['batch_size'], train=False) test_results = estimator.evaluate( input_fn=test_fn, steps=2, #self.steps_per_test_epoch, name='test') test_acc = float(test_results['accuracy']) logger.debug("Test accuracy: %f", test_acc) results['test_accuracy'] = test_acc results[ 'training_time_in_hours'] = timer_manager.get_time_since_event( 'eval', 'start', units='hours') except: logger.info('Error during evaluation') finally: if self.delete_scratch_after_use: gcu.delete_bucket_folder(model_dir) return results def save_state(self, folderpath): pass def load_state(self, folderpath): pass
mit
ChadFulton/statsmodels
statsmodels/examples/example_enhanced_boxplots.py
1
3178
from __future__ import print_function import numpy as np import matplotlib.pyplot as plt import statsmodels.api as sm # Necessary to make horizontal axis labels fit plt.rcParams['figure.subplot.bottom'] = 0.23 data = sm.datasets.anes96.load_pandas() party_ID = np.arange(7) labels = ["Strong Democrat", "Weak Democrat", "Independent-Democrat", "Independent-Independent", "Independent-Republican", "Weak Republican", "Strong Republican"] # Group age by party ID. age = [data.exog['age'][data.endog == id] for id in party_ID] # Create a violin plot. fig = plt.figure() ax = fig.add_subplot(111) sm.graphics.violinplot(age, ax=ax, labels=labels, plot_opts={'cutoff_val':5, 'cutoff_type':'abs', 'label_fontsize':'small', 'label_rotation':30}) ax.set_xlabel("Party identification of respondent.") ax.set_ylabel("Age") ax.set_title("US national election '96 - Age & Party Identification") # Create a bean plot. fig2 = plt.figure() ax = fig2.add_subplot(111) sm.graphics.beanplot(age, ax=ax, labels=labels, plot_opts={'cutoff_val':5, 'cutoff_type':'abs', 'label_fontsize':'small', 'label_rotation':30}) ax.set_xlabel("Party identification of respondent.") ax.set_ylabel("Age") ax.set_title("US national election '96 - Age & Party Identification") # Create a jitter plot. fig3 = plt.figure() ax = fig3.add_subplot(111) plot_opts={'cutoff_val':5, 'cutoff_type':'abs', 'label_fontsize':'small', 'label_rotation':30, 'violin_fc':(0.8, 0.8, 0.8), 'jitter_marker':'.', 'jitter_marker_size':3, 'bean_color':'#FF6F00', 'bean_mean_color':'#009D91'} sm.graphics.beanplot(age, ax=ax, labels=labels, jitter=True, plot_opts=plot_opts) ax.set_xlabel("Party identification of respondent.") ax.set_ylabel("Age") ax.set_title("US national election '96 - Age & Party Identification") # Create an asymmetrical jitter plot. ix = data.exog['income'] < 16 # incomes < $30k age = data.exog['age'][ix] endog = data.endog[ix] age_lower_income = [age[endog == id] for id in party_ID] ix = data.exog['income'] >= 20 # incomes > $50k age = data.exog['age'][ix] endog = data.endog[ix] age_higher_income = [age[endog == id] for id in party_ID] fig = plt.figure() ax = fig.add_subplot(111) plot_opts['violin_fc'] = (0.5, 0.5, 0.5) plot_opts['bean_show_mean'] = False plot_opts['bean_show_median'] = False plot_opts['bean_legend_text'] = 'Income < \$30k' plot_opts['cutoff_val'] = 10 sm.graphics.beanplot(age_lower_income, ax=ax, labels=labels, side='left', jitter=True, plot_opts=plot_opts) plot_opts['violin_fc'] = (0.7, 0.7, 0.7) plot_opts['bean_color'] = '#009D91' plot_opts['bean_legend_text'] = 'Income > \$50k' sm.graphics.beanplot(age_higher_income, ax=ax, labels=labels, side='right', jitter=True, plot_opts=plot_opts) ax.set_xlabel("Party identification of respondent.") ax.set_ylabel("Age") ax.set_title("US national election '96 - Age & Party Identification") # Show all plots. plt.show()
bsd-3-clause
BILS/agda
agda/species_geo_coder/forms.py
1
1365
from django import forms class SpeciesGeoCoderForm(forms.Form): name = forms.CharField(max_length=100, initial='SpeciesGeoCoder job', help_text='Job name') localities = forms.FileField( help_text='Select a file containing locality data', label='Localities', required=True) polygons = forms.FileField( help_text='Select a file containing polygon data', label='Polygons', required=True) occurences = forms.IntegerField( help_text='Specify the minimum number of occurrences (localities) needed for considering a species to be present in a polygon.', initial=1, required=False, min_value=1, label='Occurence cutoff') verbose = forms.BooleanField( help_text='Checking this box will make SpeciesGeoCoder also report how many times a species is found in each polygon.', required=False, label='Verbose') plot = forms.BooleanField( help_text='In addition to the occurrence result in NEXUS format, this function will make SpecieGeoCoder also produce graphical output that illustrates coexistance of species, distribution etc. This function is only available for datasets containing XX or less taxon names.', required=False, label='Make plots')
mit
arabenjamin/scikit-learn
examples/classification/plot_digits_classification.py
287
2397
""" ================================ Recognizing hand-written digits ================================ An example showing how the scikit-learn can be used to recognize images of hand-written digits. This example is commented in the :ref:`tutorial section of the user manual <introduction>`. """ print(__doc__) # Author: Gael Varoquaux <gael dot varoquaux at normalesup dot org> # License: BSD 3 clause # Standard scientific Python imports import matplotlib.pyplot as plt # Import datasets, classifiers and performance metrics from sklearn import datasets, svm, metrics # The digits dataset digits = datasets.load_digits() # The data that we are interested in is made of 8x8 images of digits, let's # have a look at the first 3 images, stored in the `images` attribute of the # dataset. If we were working from image files, we could load them using # pylab.imread. Note that each image must have the same size. For these # images, we know which digit they represent: it is given in the 'target' of # the dataset. images_and_labels = list(zip(digits.images, digits.target)) for index, (image, label) in enumerate(images_and_labels[:4]): plt.subplot(2, 4, index + 1) plt.axis('off') plt.imshow(image, cmap=plt.cm.gray_r, interpolation='nearest') plt.title('Training: %i' % label) # To apply a classifier on this data, we need to flatten the image, to # turn the data in a (samples, feature) matrix: n_samples = len(digits.images) data = digits.images.reshape((n_samples, -1)) # Create a classifier: a support vector classifier classifier = svm.SVC(gamma=0.001) # We learn the digits on the first half of the digits classifier.fit(data[:n_samples / 2], digits.target[:n_samples / 2]) # Now predict the value of the digit on the second half: expected = digits.target[n_samples / 2:] predicted = classifier.predict(data[n_samples / 2:]) print("Classification report for classifier %s:\n%s\n" % (classifier, metrics.classification_report(expected, predicted))) print("Confusion matrix:\n%s" % metrics.confusion_matrix(expected, predicted)) images_and_predictions = list(zip(digits.images[n_samples / 2:], predicted)) for index, (image, prediction) in enumerate(images_and_predictions[:4]): plt.subplot(2, 4, index + 5) plt.axis('off') plt.imshow(image, cmap=plt.cm.gray_r, interpolation='nearest') plt.title('Prediction: %i' % prediction) plt.show()
bsd-3-clause
tinghuiz/SfMLearner
kitti_eval/pose_evaluation_utils.py
1
13592
# Some of the code are from the TUM evaluation toolkit: # https://vision.in.tum.de/data/datasets/rgbd-dataset/tools#absolute_trajectory_error_ate import math import numpy as np def compute_ate(gtruth_file, pred_file): gtruth_list = read_file_list(gtruth_file) pred_list = read_file_list(pred_file) matches = associate(gtruth_list, pred_list, 0, 0.01) if len(matches) < 2: return False gtruth_xyz = np.array([[float(value) for value in gtruth_list[a][0:3]] for a,b in matches]) pred_xyz = np.array([[float(value) for value in pred_list[b][0:3]] for a,b in matches]) # Make sure that the first matched frames align (no need for rotational alignment as # all the predicted/ground-truth snippets have been converted to use the same coordinate # system with the first frame of the snippet being the origin). offset = gtruth_xyz[0] - pred_xyz[0] pred_xyz += offset[None,:] # Optimize the scaling factor scale = np.sum(gtruth_xyz * pred_xyz)/np.sum(pred_xyz ** 2) alignment_error = pred_xyz * scale - gtruth_xyz rmse = np.sqrt(np.sum(alignment_error ** 2))/len(matches) return rmse def read_file_list(filename): """ Reads a trajectory from a text file. File format: The file format is "stamp d1 d2 d3 ...", where stamp denotes the time stamp (to be matched) and "d1 d2 d3.." is arbitary data (e.g., a 3D position and 3D orientation) associated to this timestamp. Input: filename -- File name Output: dict -- dictionary of (stamp,data) tuples """ file = open(filename) data = file.read() lines = data.replace(","," ").replace("\t"," ").split("\n") list = [[v.strip() for v in line.split(" ") if v.strip()!=""] for line in lines if len(line)>0 and line[0]!="#"] list = [(float(l[0]),l[1:]) for l in list if len(l)>1] return dict(list) def associate(first_list, second_list,offset,max_difference): """ Associate two dictionaries of (stamp,data). As the time stamps never match exactly, we aim to find the closest match for every input tuple. Input: first_list -- first dictionary of (stamp,data) tuples second_list -- second dictionary of (stamp,data) tuples offset -- time offset between both dictionaries (e.g., to model the delay between the sensors) max_difference -- search radius for candidate generation Output: matches -- list of matched tuples ((stamp1,data1),(stamp2,data2)) """ first_keys = list(first_list.keys()) second_keys = list(second_list.keys()) potential_matches = [(abs(a - (b + offset)), a, b) for a in first_keys for b in second_keys if abs(a - (b + offset)) < max_difference] potential_matches.sort() matches = [] for diff, a, b in potential_matches: if a in first_keys and b in second_keys: first_keys.remove(a) second_keys.remove(b) matches.append((a, b)) matches.sort() return matches def rot2quat(R): rz, ry, rx = mat2euler(R) qw, qx, qy, qz = euler2quat(rz, ry, rx) return qw, qx, qy, qz def quat2mat(q): ''' Calculate rotation matrix corresponding to quaternion https://afni.nimh.nih.gov/pub/dist/src/pkundu/meica.libs/nibabel/quaternions.py Parameters ---------- q : 4 element array-like Returns ------- M : (3,3) array Rotation matrix corresponding to input quaternion *q* Notes ----- Rotation matrix applies to column vectors, and is applied to the left of coordinate vectors. The algorithm here allows non-unit quaternions. References ---------- Algorithm from http://en.wikipedia.org/wiki/Rotation_matrix#Quaternion Examples -------- >>> import numpy as np >>> M = quat2mat([1, 0, 0, 0]) # Identity quaternion >>> np.allclose(M, np.eye(3)) True >>> M = quat2mat([0, 1, 0, 0]) # 180 degree rotn around axis 0 >>> np.allclose(M, np.diag([1, -1, -1])) True ''' w, x, y, z = q Nq = w*w + x*x + y*y + z*z if Nq < 1e-8: return np.eye(3) s = 2.0/Nq X = x*s Y = y*s Z = z*s wX = w*X; wY = w*Y; wZ = w*Z xX = x*X; xY = x*Y; xZ = x*Z yY = y*Y; yZ = y*Z; zZ = z*Z return np.array( [[ 1.0-(yY+zZ), xY-wZ, xZ+wY ], [ xY+wZ, 1.0-(xX+zZ), yZ-wX ], [ xZ-wY, yZ+wX, 1.0-(xX+yY) ]]) def mat2euler(M, cy_thresh=None, seq='zyx'): ''' Taken From: http://afni.nimh.nih.gov/pub/dist/src/pkundu/meica.libs/nibabel/eulerangles.py Discover Euler angle vector from 3x3 matrix Uses the conventions above. Parameters ---------- M : array-like, shape (3,3) cy_thresh : None or scalar, optional threshold below which to give up on straightforward arctan for estimating x rotation. If None (default), estimate from precision of input. Returns ------- z : scalar y : scalar x : scalar Rotations in radians around z, y, x axes, respectively Notes ----- If there was no numerical error, the routine could be derived using Sympy expression for z then y then x rotation matrix, which is:: [ cos(y)*cos(z), -cos(y)*sin(z), sin(y)], [cos(x)*sin(z) + cos(z)*sin(x)*sin(y), cos(x)*cos(z) - sin(x)*sin(y)*sin(z), -cos(y)*sin(x)], [sin(x)*sin(z) - cos(x)*cos(z)*sin(y), cos(z)*sin(x) + cos(x)*sin(y)*sin(z), cos(x)*cos(y)] with the obvious derivations for z, y, and x z = atan2(-r12, r11) y = asin(r13) x = atan2(-r23, r33) for x,y,z order y = asin(-r31) x = atan2(r32, r33) z = atan2(r21, r11) Problems arise when cos(y) is close to zero, because both of:: z = atan2(cos(y)*sin(z), cos(y)*cos(z)) x = atan2(cos(y)*sin(x), cos(x)*cos(y)) will be close to atan2(0, 0), and highly unstable. The ``cy`` fix for numerical instability below is from: *Graphics Gems IV*, Paul Heckbert (editor), Academic Press, 1994, ISBN: 0123361559. Specifically it comes from EulerAngles.c by Ken Shoemake, and deals with the case where cos(y) is close to zero: See: http://www.graphicsgems.org/ The code appears to be licensed (from the website) as "can be used without restrictions". ''' M = np.asarray(M) if cy_thresh is None: try: cy_thresh = np.finfo(M.dtype).eps * 4 except ValueError: cy_thresh = _FLOAT_EPS_4 r11, r12, r13, r21, r22, r23, r31, r32, r33 = M.flat # cy: sqrt((cos(y)*cos(z))**2 + (cos(x)*cos(y))**2) cy = math.sqrt(r33*r33 + r23*r23) if seq=='zyx': if cy > cy_thresh: # cos(y) not close to zero, standard form z = math.atan2(-r12, r11) # atan2(cos(y)*sin(z), cos(y)*cos(z)) y = math.atan2(r13, cy) # atan2(sin(y), cy) x = math.atan2(-r23, r33) # atan2(cos(y)*sin(x), cos(x)*cos(y)) else: # cos(y) (close to) zero, so x -> 0.0 (see above) # so r21 -> sin(z), r22 -> cos(z) and z = math.atan2(r21, r22) y = math.atan2(r13, cy) # atan2(sin(y), cy) x = 0.0 elif seq=='xyz': if cy > cy_thresh: y = math.atan2(-r31, cy) x = math.atan2(r32, r33) z = math.atan2(r21, r11) else: z = 0.0 if r31 < 0: y = np.pi/2 x = atan2(r12, r13) else: y = -np.pi/2 else: raise Exception('Sequence not recognized') return z, y, x import functools def euler2mat(z=0, y=0, x=0, isRadian=True): ''' Return matrix for rotations around z, y and x axes Uses the z, then y, then x convention above Parameters ---------- z : scalar Rotation angle in radians around z-axis (performed first) y : scalar Rotation angle in radians around y-axis x : scalar Rotation angle in radians around x-axis (performed last) Returns ------- M : array shape (3,3) Rotation matrix giving same rotation as for given angles Examples -------- >>> zrot = 1.3 # radians >>> yrot = -0.1 >>> xrot = 0.2 >>> M = euler2mat(zrot, yrot, xrot) >>> M.shape == (3, 3) True The output rotation matrix is equal to the composition of the individual rotations >>> M1 = euler2mat(zrot) >>> M2 = euler2mat(0, yrot) >>> M3 = euler2mat(0, 0, xrot) >>> composed_M = np.dot(M3, np.dot(M2, M1)) >>> np.allclose(M, composed_M) True You can specify rotations by named arguments >>> np.all(M3 == euler2mat(x=xrot)) True When applying M to a vector, the vector should column vector to the right of M. If the right hand side is a 2D array rather than a vector, then each column of the 2D array represents a vector. >>> vec = np.array([1, 0, 0]).reshape((3,1)) >>> v2 = np.dot(M, vec) >>> vecs = np.array([[1, 0, 0],[0, 1, 0]]).T # giving 3x2 array >>> vecs2 = np.dot(M, vecs) Rotations are counter-clockwise. >>> zred = np.dot(euler2mat(z=np.pi/2), np.eye(3)) >>> np.allclose(zred, [[0, -1, 0],[1, 0, 0], [0, 0, 1]]) True >>> yred = np.dot(euler2mat(y=np.pi/2), np.eye(3)) >>> np.allclose(yred, [[0, 0, 1],[0, 1, 0], [-1, 0, 0]]) True >>> xred = np.dot(euler2mat(x=np.pi/2), np.eye(3)) >>> np.allclose(xred, [[1, 0, 0],[0, 0, -1], [0, 1, 0]]) True Notes ----- The direction of rotation is given by the right-hand rule (orient the thumb of the right hand along the axis around which the rotation occurs, with the end of the thumb at the positive end of the axis; curl your fingers; the direction your fingers curl is the direction of rotation). Therefore, the rotations are counterclockwise if looking along the axis of rotation from positive to negative. ''' if not isRadian: z = ((np.pi)/180.) * z y = ((np.pi)/180.) * y x = ((np.pi)/180.) * x assert z>=(-np.pi) and z < np.pi, 'Inapprorpriate z: %f' % z assert y>=(-np.pi) and y < np.pi, 'Inapprorpriate y: %f' % y assert x>=(-np.pi) and x < np.pi, 'Inapprorpriate x: %f' % x Ms = [] if z: cosz = math.cos(z) sinz = math.sin(z) Ms.append(np.array( [[cosz, -sinz, 0], [sinz, cosz, 0], [0, 0, 1]])) if y: cosy = math.cos(y) siny = math.sin(y) Ms.append(np.array( [[cosy, 0, siny], [0, 1, 0], [-siny, 0, cosy]])) if x: cosx = math.cos(x) sinx = math.sin(x) Ms.append(np.array( [[1, 0, 0], [0, cosx, -sinx], [0, sinx, cosx]])) if Ms: return functools.reduce(np.dot, Ms[::-1]) return np.eye(3) def euler2quat(z=0, y=0, x=0, isRadian=True): ''' Return quaternion corresponding to these Euler angles Uses the z, then y, then x convention above Parameters ---------- z : scalar Rotation angle in radians around z-axis (performed first) y : scalar Rotation angle in radians around y-axis x : scalar Rotation angle in radians around x-axis (performed last) Returns ------- quat : array shape (4,) Quaternion in w, x, y z (real, then vector) format Notes ----- We can derive this formula in Sympy using: 1. Formula giving quaternion corresponding to rotation of theta radians about arbitrary axis: http://mathworld.wolfram.com/EulerParameters.html 2. Generated formulae from 1.) for quaternions corresponding to theta radians rotations about ``x, y, z`` axes 3. Apply quaternion multiplication formula - http://en.wikipedia.org/wiki/Quaternions#Hamilton_product - to formulae from 2.) to give formula for combined rotations. ''' if not isRadian: z = ((np.pi)/180.) * z y = ((np.pi)/180.) * y x = ((np.pi)/180.) * x z = z/2.0 y = y/2.0 x = x/2.0 cz = math.cos(z) sz = math.sin(z) cy = math.cos(y) sy = math.sin(y) cx = math.cos(x) sx = math.sin(x) return np.array([ cx*cy*cz - sx*sy*sz, cx*sy*sz + cy*cz*sx, cx*cz*sy - sx*cy*sz, cx*cy*sz + sx*cz*sy]) def pose_vec_to_mat(vec): tx = vec[0] ty = vec[1] tz = vec[2] trans = np.array([tx, ty, tz]).reshape((3,1)) rot = euler2mat(vec[5], vec[4], vec[3]) Tmat = np.concatenate((rot, trans), axis=1) hfiller = np.array([0, 0, 0, 1]).reshape((1,4)) Tmat = np.concatenate((Tmat, hfiller), axis=0) return Tmat def dump_pose_seq_TUM(out_file, poses, times): # First frame as the origin first_pose = pose_vec_to_mat(poses[0]) with open(out_file, 'w') as f: for p in range(len(times)): this_pose = pose_vec_to_mat(poses[p]) this_pose = np.dot(first_pose, np.linalg.inv(this_pose)) tx = this_pose[0, 3] ty = this_pose[1, 3] tz = this_pose[2, 3] rot = this_pose[:3, :3] qw, qx, qy, qz = rot2quat(rot) f.write('%f %f %f %f %f %f %f %f\n' % (times[p], tx, ty, tz, qx, qy, qz, qw))
mit
anntzer/scipy
scipy/cluster/vq.py
10
29222
""" K-means clustering and vector quantization (:mod:`scipy.cluster.vq`) ==================================================================== Provides routines for k-means clustering, generating code books from k-means models and quantizing vectors by comparing them with centroids in a code book. .. autosummary:: :toctree: generated/ whiten -- Normalize a group of observations so each feature has unit variance vq -- Calculate code book membership of a set of observation vectors kmeans -- Perform k-means on a set of observation vectors forming k clusters kmeans2 -- A different implementation of k-means with more methods -- for initializing centroids Background information ---------------------- The k-means algorithm takes as input the number of clusters to generate, k, and a set of observation vectors to cluster. It returns a set of centroids, one for each of the k clusters. An observation vector is classified with the cluster number or centroid index of the centroid closest to it. A vector v belongs to cluster i if it is closer to centroid i than any other centroid. If v belongs to i, we say centroid i is the dominating centroid of v. The k-means algorithm tries to minimize distortion, which is defined as the sum of the squared distances between each observation vector and its dominating centroid. The minimization is achieved by iteratively reclassifying the observations into clusters and recalculating the centroids until a configuration is reached in which the centroids are stable. One can also define a maximum number of iterations. Since vector quantization is a natural application for k-means, information theory terminology is often used. The centroid index or cluster index is also referred to as a "code" and the table mapping codes to centroids and, vice versa, is often referred to as a "code book". The result of k-means, a set of centroids, can be used to quantize vectors. Quantization aims to find an encoding of vectors that reduces the expected distortion. All routines expect obs to be an M by N array, where the rows are the observation vectors. The codebook is a k by N array, where the ith row is the centroid of code word i. The observation vectors and centroids have the same feature dimension. As an example, suppose we wish to compress a 24-bit color image (each pixel is represented by one byte for red, one for blue, and one for green) before sending it over the web. By using a smaller 8-bit encoding, we can reduce the amount of data by two thirds. Ideally, the colors for each of the 256 possible 8-bit encoding values should be chosen to minimize distortion of the color. Running k-means with k=256 generates a code book of 256 codes, which fills up all possible 8-bit sequences. Instead of sending a 3-byte value for each pixel, the 8-bit centroid index (or code word) of the dominating centroid is transmitted. The code book is also sent over the wire so each 8-bit code can be translated back to a 24-bit pixel value representation. If the image of interest was of an ocean, we would expect many 24-bit blues to be represented by 8-bit codes. If it was an image of a human face, more flesh-tone colors would be represented in the code book. """ import warnings import numpy as np from collections import deque from scipy._lib._util import _asarray_validated, check_random_state,\ rng_integers from scipy.spatial.distance import cdist from . import _vq __docformat__ = 'restructuredtext' __all__ = ['whiten', 'vq', 'kmeans', 'kmeans2'] class ClusterError(Exception): pass def whiten(obs, check_finite=True): """ Normalize a group of observations on a per feature basis. Before running k-means, it is beneficial to rescale each feature dimension of the observation set by its standard deviation (i.e. "whiten" it - as in "white noise" where each frequency has equal power). Each feature is divided by its standard deviation across all observations to give it unit variance. Parameters ---------- obs : ndarray Each row of the array is an observation. The columns are the features seen during each observation. >>> # f0 f1 f2 >>> obs = [[ 1., 1., 1.], #o0 ... [ 2., 2., 2.], #o1 ... [ 3., 3., 3.], #o2 ... [ 4., 4., 4.]] #o3 check_finite : bool, optional Whether to check that the input matrices contain only finite numbers. Disabling may give a performance gain, but may result in problems (crashes, non-termination) if the inputs do contain infinities or NaNs. Default: True Returns ------- result : ndarray Contains the values in `obs` scaled by the standard deviation of each column. Examples -------- >>> import numpy as np >>> from scipy.cluster.vq import whiten >>> features = np.array([[1.9, 2.3, 1.7], ... [1.5, 2.5, 2.2], ... [0.8, 0.6, 1.7,]]) >>> whiten(features) array([[ 4.17944278, 2.69811351, 7.21248917], [ 3.29956009, 2.93273208, 9.33380951], [ 1.75976538, 0.7038557 , 7.21248917]]) """ obs = _asarray_validated(obs, check_finite=check_finite) std_dev = obs.std(axis=0) zero_std_mask = std_dev == 0 if zero_std_mask.any(): std_dev[zero_std_mask] = 1.0 warnings.warn("Some columns have standard deviation zero. " "The values of these columns will not change.", RuntimeWarning) return obs / std_dev def vq(obs, code_book, check_finite=True): """ Assign codes from a code book to observations. Assigns a code from a code book to each observation. Each observation vector in the 'M' by 'N' `obs` array is compared with the centroids in the code book and assigned the code of the closest centroid. The features in `obs` should have unit variance, which can be achieved by passing them through the whiten function. The code book can be created with the k-means algorithm or a different encoding algorithm. Parameters ---------- obs : ndarray Each row of the 'M' x 'N' array is an observation. The columns are the "features" seen during each observation. The features must be whitened first using the whiten function or something equivalent. code_book : ndarray The code book is usually generated using the k-means algorithm. Each row of the array holds a different code, and the columns are the features of the code. >>> # f0 f1 f2 f3 >>> code_book = [ ... [ 1., 2., 3., 4.], #c0 ... [ 1., 2., 3., 4.], #c1 ... [ 1., 2., 3., 4.]] #c2 check_finite : bool, optional Whether to check that the input matrices contain only finite numbers. Disabling may give a performance gain, but may result in problems (crashes, non-termination) if the inputs do contain infinities or NaNs. Default: True Returns ------- code : ndarray A length M array holding the code book index for each observation. dist : ndarray The distortion (distance) between the observation and its nearest code. Examples -------- >>> import numpy as np >>> from scipy.cluster.vq import vq >>> code_book = np.array([[1.,1.,1.], ... [2.,2.,2.]]) >>> features = np.array([[ 1.9,2.3,1.7], ... [ 1.5,2.5,2.2], ... [ 0.8,0.6,1.7]]) >>> vq(features,code_book) (array([1, 1, 0],'i'), array([ 0.43588989, 0.73484692, 0.83066239])) """ obs = _asarray_validated(obs, check_finite=check_finite) code_book = _asarray_validated(code_book, check_finite=check_finite) ct = np.common_type(obs, code_book) c_obs = obs.astype(ct, copy=False) c_code_book = code_book.astype(ct, copy=False) if np.issubdtype(ct, np.float64) or np.issubdtype(ct, np.float32): return _vq.vq(c_obs, c_code_book) return py_vq(obs, code_book, check_finite=False) def py_vq(obs, code_book, check_finite=True): """ Python version of vq algorithm. The algorithm computes the Euclidean distance between each observation and every frame in the code_book. Parameters ---------- obs : ndarray Expects a rank 2 array. Each row is one observation. code_book : ndarray Code book to use. Same format than obs. Should have same number of features (e.g., columns) than obs. check_finite : bool, optional Whether to check that the input matrices contain only finite numbers. Disabling may give a performance gain, but may result in problems (crashes, non-termination) if the inputs do contain infinities or NaNs. Default: True Returns ------- code : ndarray code[i] gives the label of the ith obversation; its code is code_book[code[i]]. mind_dist : ndarray min_dist[i] gives the distance between the ith observation and its corresponding code. Notes ----- This function is slower than the C version but works for all input types. If the inputs have the wrong types for the C versions of the function, this one is called as a last resort. It is about 20 times slower than the C version. """ obs = _asarray_validated(obs, check_finite=check_finite) code_book = _asarray_validated(code_book, check_finite=check_finite) if obs.ndim != code_book.ndim: raise ValueError("Observation and code_book should have the same rank") if obs.ndim == 1: obs = obs[:, np.newaxis] code_book = code_book[:, np.newaxis] dist = cdist(obs, code_book) code = dist.argmin(axis=1) min_dist = dist[np.arange(len(code)), code] return code, min_dist def _kmeans(obs, guess, thresh=1e-5): """ "raw" version of k-means. Returns ------- code_book The lowest distortion codebook found. avg_dist The average distance a observation is from a code in the book. Lower means the code_book matches the data better. See Also -------- kmeans : wrapper around k-means Examples -------- Note: not whitened in this example. >>> import numpy as np >>> from scipy.cluster.vq import _kmeans >>> features = np.array([[ 1.9,2.3], ... [ 1.5,2.5], ... [ 0.8,0.6], ... [ 0.4,1.8], ... [ 1.0,1.0]]) >>> book = np.array((features[0],features[2])) >>> _kmeans(features,book) (array([[ 1.7 , 2.4 ], [ 0.73333333, 1.13333333]]), 0.40563916697728591) """ code_book = np.asarray(guess) diff = np.inf prev_avg_dists = deque([diff], maxlen=2) while diff > thresh: # compute membership and distances between obs and code_book obs_code, distort = vq(obs, code_book, check_finite=False) prev_avg_dists.append(distort.mean(axis=-1)) # recalc code_book as centroids of associated obs code_book, has_members = _vq.update_cluster_means(obs, obs_code, code_book.shape[0]) code_book = code_book[has_members] diff = prev_avg_dists[0] - prev_avg_dists[1] return code_book, prev_avg_dists[1] def kmeans(obs, k_or_guess, iter=20, thresh=1e-5, check_finite=True, *, seed=None): """ Performs k-means on a set of observation vectors forming k clusters. The k-means algorithm adjusts the classification of the observations into clusters and updates the cluster centroids until the position of the centroids is stable over successive iterations. In this implementation of the algorithm, the stability of the centroids is determined by comparing the absolute value of the change in the average Euclidean distance between the observations and their corresponding centroids against a threshold. This yields a code book mapping centroids to codes and vice versa. Parameters ---------- obs : ndarray Each row of the M by N array is an observation vector. The columns are the features seen during each observation. The features must be whitened first with the `whiten` function. k_or_guess : int or ndarray The number of centroids to generate. A code is assigned to each centroid, which is also the row index of the centroid in the code_book matrix generated. The initial k centroids are chosen by randomly selecting observations from the observation matrix. Alternatively, passing a k by N array specifies the initial k centroids. iter : int, optional The number of times to run k-means, returning the codebook with the lowest distortion. This argument is ignored if initial centroids are specified with an array for the ``k_or_guess`` parameter. This parameter does not represent the number of iterations of the k-means algorithm. thresh : float, optional Terminates the k-means algorithm if the change in distortion since the last k-means iteration is less than or equal to threshold. check_finite : bool, optional Whether to check that the input matrices contain only finite numbers. Disabling may give a performance gain, but may result in problems (crashes, non-termination) if the inputs do contain infinities or NaNs. Default: True seed : {None, int, `numpy.random.Generator`, `numpy.random.RandomState`}, optional Seed for initializing the pseudo-random number generator. If `seed` is None (or `numpy.random`), the `numpy.random.RandomState` singleton is used. If `seed` is an int, a new ``RandomState`` instance is used, seeded with `seed`. If `seed` is already a ``Generator`` or ``RandomState`` instance then that instance is used. The default is None. Returns ------- codebook : ndarray A k by N array of k centroids. The ith centroid codebook[i] is represented with the code i. The centroids and codes generated represent the lowest distortion seen, not necessarily the globally minimal distortion. Note that the number of centroids is not necessarily the same as the ``k_or_guess`` parameter, because centroids assigned to no observations are removed during iterations. distortion : float The mean (non-squared) Euclidean distance between the observations passed and the centroids generated. Note the difference to the standard definition of distortion in the context of the k-means algorithm, which is the sum of the squared distances. See Also -------- kmeans2 : a different implementation of k-means clustering with more methods for generating initial centroids but without using a distortion change threshold as a stopping criterion. whiten : must be called prior to passing an observation matrix to kmeans. Notes ----- For more functionalities or optimal performance, you can use `sklearn.cluster.KMeans <https://scikit-learn.org/stable/modules/generated/sklearn.cluster.KMeans.html>`_. `This <https://hdbscan.readthedocs.io/en/latest/performance_and_scalability.html#comparison-of-high-performance-implementations>`_ is a benchmark result of several implementations. Examples -------- >>> import numpy as np >>> from scipy.cluster.vq import vq, kmeans, whiten >>> import matplotlib.pyplot as plt >>> features = np.array([[ 1.9,2.3], ... [ 1.5,2.5], ... [ 0.8,0.6], ... [ 0.4,1.8], ... [ 0.1,0.1], ... [ 0.2,1.8], ... [ 2.0,0.5], ... [ 0.3,1.5], ... [ 1.0,1.0]]) >>> whitened = whiten(features) >>> book = np.array((whitened[0],whitened[2])) >>> kmeans(whitened,book) (array([[ 2.3110306 , 2.86287398], # random [ 0.93218041, 1.24398691]]), 0.85684700941625547) >>> codes = 3 >>> kmeans(whitened,codes) (array([[ 2.3110306 , 2.86287398], # random [ 1.32544402, 0.65607529], [ 0.40782893, 2.02786907]]), 0.5196582527686241) >>> # Create 50 datapoints in two clusters a and b >>> pts = 50 >>> rng = np.random.default_rng() >>> a = rng.multivariate_normal([0, 0], [[4, 1], [1, 4]], size=pts) >>> b = rng.multivariate_normal([30, 10], ... [[10, 2], [2, 1]], ... size=pts) >>> features = np.concatenate((a, b)) >>> # Whiten data >>> whitened = whiten(features) >>> # Find 2 clusters in the data >>> codebook, distortion = kmeans(whitened, 2) >>> # Plot whitened data and cluster centers in red >>> plt.scatter(whitened[:, 0], whitened[:, 1]) >>> plt.scatter(codebook[:, 0], codebook[:, 1], c='r') >>> plt.show() """ obs = _asarray_validated(obs, check_finite=check_finite) if iter < 1: raise ValueError("iter must be at least 1, got %s" % iter) # Determine whether a count (scalar) or an initial guess (array) was passed. if not np.isscalar(k_or_guess): guess = _asarray_validated(k_or_guess, check_finite=check_finite) if guess.size < 1: raise ValueError("Asked for 0 clusters. Initial book was %s" % guess) return _kmeans(obs, guess, thresh=thresh) # k_or_guess is a scalar, now verify that it's an integer k = int(k_or_guess) if k != k_or_guess: raise ValueError("If k_or_guess is a scalar, it must be an integer.") if k < 1: raise ValueError("Asked for %d clusters." % k) rng = check_random_state(seed) # initialize best distance value to a large value best_dist = np.inf for i in range(iter): # the initial code book is randomly selected from observations guess = _kpoints(obs, k, rng) book, dist = _kmeans(obs, guess, thresh=thresh) if dist < best_dist: best_book = book best_dist = dist return best_book, best_dist def _kpoints(data, k, rng): """Pick k points at random in data (one row = one observation). Parameters ---------- data : ndarray Expect a rank 1 or 2 array. Rank 1 are assumed to describe one dimensional data, rank 2 multidimensional data, in which case one row is one observation. k : int Number of samples to generate. rng : `numpy.random.Generator` or `numpy.random.RandomState` Random number generator. Returns ------- x : ndarray A 'k' by 'N' containing the initial centroids """ idx = rng.choice(data.shape[0], size=k, replace=False) return data[idx] def _krandinit(data, k, rng): """Returns k samples of a random variable whose parameters depend on data. More precisely, it returns k observations sampled from a Gaussian random variable whose mean and covariances are the ones estimated from the data. Parameters ---------- data : ndarray Expect a rank 1 or 2 array. Rank 1 is assumed to describe 1-D data, rank 2 multidimensional data, in which case one row is one observation. k : int Number of samples to generate. rng : `numpy.random.Generator` or `numpy.random.RandomState` Random number generator. Returns ------- x : ndarray A 'k' by 'N' containing the initial centroids """ mu = data.mean(axis=0) if data.ndim == 1: cov = np.cov(data) x = rng.standard_normal(size=k) x *= np.sqrt(cov) elif data.shape[1] > data.shape[0]: # initialize when the covariance matrix is rank deficient _, s, vh = np.linalg.svd(data - mu, full_matrices=False) x = rng.standard_normal(size=(k, s.size)) sVh = s[:, None] * vh / np.sqrt(data.shape[0] - 1) x = x.dot(sVh) else: cov = np.atleast_2d(np.cov(data, rowvar=False)) # k rows, d cols (one row = one obs) # Generate k sample of a random variable ~ Gaussian(mu, cov) x = rng.standard_normal(size=(k, mu.size)) x = x.dot(np.linalg.cholesky(cov).T) x += mu return x def _kpp(data, k, rng): """ Picks k points in the data based on the kmeans++ method. Parameters ---------- data : ndarray Expect a rank 1 or 2 array. Rank 1 is assumed to describe 1-D data, rank 2 multidimensional data, in which case one row is one observation. k : int Number of samples to generate. rng : `numpy.random.Generator` or `numpy.random.RandomState` Random number generator. Returns ------- init : ndarray A 'k' by 'N' containing the initial centroids. References ---------- .. [1] D. Arthur and S. Vassilvitskii, "k-means++: the advantages of careful seeding", Proceedings of the Eighteenth Annual ACM-SIAM Symposium on Discrete Algorithms, 2007. """ dims = data.shape[1] if len(data.shape) > 1 else 1 init = np.ndarray((k, dims)) for i in range(k): if i == 0: init[i, :] = data[rng_integers(rng, data.shape[0])] else: D2 = cdist(init[:i,:], data, metric='sqeuclidean').min(axis=0) probs = D2/D2.sum() cumprobs = probs.cumsum() r = rng.uniform() init[i, :] = data[np.searchsorted(cumprobs, r)] return init _valid_init_meth = {'random': _krandinit, 'points': _kpoints, '++': _kpp} def _missing_warn(): """Print a warning when called.""" warnings.warn("One of the clusters is empty. " "Re-run kmeans with a different initialization.") def _missing_raise(): """Raise a ClusterError when called.""" raise ClusterError("One of the clusters is empty. " "Re-run kmeans with a different initialization.") _valid_miss_meth = {'warn': _missing_warn, 'raise': _missing_raise} def kmeans2(data, k, iter=10, thresh=1e-5, minit='random', missing='warn', check_finite=True, *, seed=None): """ Classify a set of observations into k clusters using the k-means algorithm. The algorithm attempts to minimize the Euclidean distance between observations and centroids. Several initialization methods are included. Parameters ---------- data : ndarray A 'M' by 'N' array of 'M' observations in 'N' dimensions or a length 'M' array of 'M' 1-D observations. k : int or ndarray The number of clusters to form as well as the number of centroids to generate. If `minit` initialization string is 'matrix', or if a ndarray is given instead, it is interpreted as initial cluster to use instead. iter : int, optional Number of iterations of the k-means algorithm to run. Note that this differs in meaning from the iters parameter to the kmeans function. thresh : float, optional (not used yet) minit : str, optional Method for initialization. Available methods are 'random', 'points', '++' and 'matrix': 'random': generate k centroids from a Gaussian with mean and variance estimated from the data. 'points': choose k observations (rows) at random from data for the initial centroids. '++': choose k observations accordingly to the kmeans++ method (careful seeding) 'matrix': interpret the k parameter as a k by M (or length k array for 1-D data) array of initial centroids. missing : str, optional Method to deal with empty clusters. Available methods are 'warn' and 'raise': 'warn': give a warning and continue. 'raise': raise an ClusterError and terminate the algorithm. check_finite : bool, optional Whether to check that the input matrices contain only finite numbers. Disabling may give a performance gain, but may result in problems (crashes, non-termination) if the inputs do contain infinities or NaNs. Default: True seed : {None, int, `numpy.random.Generator`, `numpy.random.RandomState`}, optional Seed for initializing the pseudo-random number generator. If `seed` is None (or `numpy.random`), the `numpy.random.RandomState` singleton is used. If `seed` is an int, a new ``RandomState`` instance is used, seeded with `seed`. If `seed` is already a ``Generator`` or ``RandomState`` instance then that instance is used. The default is None. Returns ------- centroid : ndarray A 'k' by 'N' array of centroids found at the last iteration of k-means. label : ndarray label[i] is the code or index of the centroid the ith observation is closest to. See Also -------- kmeans References ---------- .. [1] D. Arthur and S. Vassilvitskii, "k-means++: the advantages of careful seeding", Proceedings of the Eighteenth Annual ACM-SIAM Symposium on Discrete Algorithms, 2007. Examples -------- >>> from scipy.cluster.vq import kmeans2 >>> import matplotlib.pyplot as plt >>> import numpy as np Create z, an array with shape (100, 2) containing a mixture of samples from three multivariate normal distributions. >>> rng = np.random.default_rng() >>> a = rng.multivariate_normal([0, 6], [[2, 1], [1, 1.5]], size=45) >>> b = rng.multivariate_normal([2, 0], [[1, -1], [-1, 3]], size=30) >>> c = rng.multivariate_normal([6, 4], [[5, 0], [0, 1.2]], size=25) >>> z = np.concatenate((a, b, c)) >>> rng.shuffle(z) Compute three clusters. >>> centroid, label = kmeans2(z, 3, minit='points') >>> centroid array([[ 2.22274463, -0.61666946], # may vary [ 0.54069047, 5.86541444], [ 6.73846769, 4.01991898]]) How many points are in each cluster? >>> counts = np.bincount(label) >>> counts array([29, 51, 20]) # may vary Plot the clusters. >>> w0 = z[label == 0] >>> w1 = z[label == 1] >>> w2 = z[label == 2] >>> plt.plot(w0[:, 0], w0[:, 1], 'o', alpha=0.5, label='cluster 0') >>> plt.plot(w1[:, 0], w1[:, 1], 'd', alpha=0.5, label='cluster 1') >>> plt.plot(w2[:, 0], w2[:, 1], 's', alpha=0.5, label='cluster 2') >>> plt.plot(centroid[:, 0], centroid[:, 1], 'k*', label='centroids') >>> plt.axis('equal') >>> plt.legend(shadow=True) >>> plt.show() """ if int(iter) < 1: raise ValueError("Invalid iter (%s), " "must be a positive integer." % iter) try: miss_meth = _valid_miss_meth[missing] except KeyError as e: raise ValueError("Unknown missing method %r" % (missing,)) from e data = _asarray_validated(data, check_finite=check_finite) if data.ndim == 1: d = 1 elif data.ndim == 2: d = data.shape[1] else: raise ValueError("Input of rank > 2 is not supported.") if data.size < 1: raise ValueError("Empty input is not supported.") # If k is not a single value, it should be compatible with data's shape if minit == 'matrix' or not np.isscalar(k): code_book = np.array(k, copy=True) if data.ndim != code_book.ndim: raise ValueError("k array doesn't match data rank") nc = len(code_book) if data.ndim > 1 and code_book.shape[1] != d: raise ValueError("k array doesn't match data dimension") else: nc = int(k) if nc < 1: raise ValueError("Cannot ask kmeans2 for %d clusters" " (k was %s)" % (nc, k)) elif nc != k: warnings.warn("k was not an integer, was converted.") try: init_meth = _valid_init_meth[minit] except KeyError as e: raise ValueError("Unknown init method %r" % (minit,)) from e else: rng = check_random_state(seed) code_book = init_meth(data, k, rng) for i in range(iter): # Compute the nearest neighbor for each obs using the current code book label = vq(data, code_book)[0] # Update the code book by computing centroids new_code_book, has_members = _vq.update_cluster_means(data, label, nc) if not has_members.all(): miss_meth() # Set the empty clusters to their previous positions new_code_book[~has_members] = code_book[~has_members] code_book = new_code_book return code_book, label
bsd-3-clause
heli522/scikit-learn
sklearn/linear_model/tests/test_perceptron.py
375
1815
import numpy as np import scipy.sparse as sp from sklearn.utils.testing import assert_array_almost_equal from sklearn.utils.testing import assert_true from sklearn.utils.testing import assert_raises from sklearn.utils import check_random_state from sklearn.datasets import load_iris from sklearn.linear_model import Perceptron iris = load_iris() random_state = check_random_state(12) indices = np.arange(iris.data.shape[0]) random_state.shuffle(indices) X = iris.data[indices] y = iris.target[indices] X_csr = sp.csr_matrix(X) X_csr.sort_indices() class MyPerceptron(object): def __init__(self, n_iter=1): self.n_iter = n_iter def fit(self, X, y): n_samples, n_features = X.shape self.w = np.zeros(n_features, dtype=np.float64) self.b = 0.0 for t in range(self.n_iter): for i in range(n_samples): if self.predict(X[i])[0] != y[i]: self.w += y[i] * X[i] self.b += y[i] def project(self, X): return np.dot(X, self.w) + self.b def predict(self, X): X = np.atleast_2d(X) return np.sign(self.project(X)) def test_perceptron_accuracy(): for data in (X, X_csr): clf = Perceptron(n_iter=30, shuffle=False) clf.fit(data, y) score = clf.score(data, y) assert_true(score >= 0.7) def test_perceptron_correctness(): y_bin = y.copy() y_bin[y != 1] = -1 clf1 = MyPerceptron(n_iter=2) clf1.fit(X, y_bin) clf2 = Perceptron(n_iter=2, shuffle=False) clf2.fit(X, y_bin) assert_array_almost_equal(clf1.w, clf2.coef_.ravel()) def test_undefined_methods(): clf = Perceptron() for meth in ("predict_proba", "predict_log_proba"): assert_raises(AttributeError, lambda x: getattr(clf, x), meth)
bsd-3-clause
rdevon/cortex
cortex/built_ins/networks/resnets.py
1
9487
'''Residual encoder / decoder ''' import logging import torch.nn as nn from .base_network import BaseNet from .modules import View from .utils import (apply_nonlinearity, finish_layer_2d, get_nonlinearity) from .SpectralNormLayer import SNConv2d, SNLinear logger = logging.getLogger('cortex.models' + __name__) class ConvMeanPool(nn.Module): def __init__(self, dim_in, dim_out, f_size, nonlinearity=None, prefix='', spectral_norm=False): super(ConvMeanPool, self).__init__() Conv2d = SNConv2d if spectral_norm else nn.Conv2d models = nn.Sequential() nonlinearity = get_nonlinearity(nonlinearity) name = 'cmp' + prefix models.add_module(name, Conv2d(dim_in, dim_out, f_size, 1, 1, bias=False)) models.add_module(name + '_pool', nn.AvgPool2d(2, count_include_pad=False)) if nonlinearity: models.add_module('{}_{}'.format( name, nonlinearity.__class__.__name__), nonlinearity) self.models = models def forward(self, x): x = self.models(x) return x class MeanPoolConv(nn.Module): def __init__(self, dim_in, dim_out, f_size, nonlinearity=None, prefix='', spectral_norm=False): super(MeanPoolConv, self).__init__() Conv2d = SNConv2d if spectral_norm else nn.Conv2d models = nn.Sequential() nonlinearity = get_nonlinearity(nonlinearity) name = 'mpc' + prefix models.add_module(name + '_pool', nn.AvgPool2d( 2, count_include_pad=False)) models.add_module( name, Conv2d(dim_in, dim_out, f_size, 1, 1, bias=False)) if nonlinearity: models.add_module( '{}_{}'.format(name, nonlinearity.__class__.__name__), nonlinearity) self.models = models def forward(self, x): x = self.models(x) return x class UpsampleConv(nn.Module): def __init__(self, dim_in, dim_out, f_size, nonlinearity=None, prefix='', spectral_norm=False): super(UpsampleConv, self).__init__() Conv2d = SNConv2d if spectral_norm else nn.Conv2d models = nn.Sequential() nonlinearity = get_nonlinearity(nonlinearity) name = prefix + '_usc' models.add_module(name + '_up', nn.Upsample(scale_factor=2)) models.add_module(name, Conv2d(dim_in, dim_out, f_size, 1, 1, bias=False)) if nonlinearity: models.add_module( '{}_{}'.format(name, nonlinearity.__class__.__name__), nonlinearity) self.models = models def forward(self, x): x = self.models(x) return x class ResBlock(nn.Module): def __init__(self, dim_in, dim_out, dim_x, dim_y, f_size, resample=None, name='resblock', nonlinearity='ReLU', spectral_norm=False, **layer_args): super(ResBlock, self).__init__() Conv2d = SNConv2d if spectral_norm else nn.Conv2d models = nn.Sequential() skip_models = nn.Sequential() nonlinearity = get_nonlinearity(nonlinearity) if resample not in ('up', 'down'): raise Exception('invalid resample value: {}'.format(resample)) # Skip model if resample == 'down': conv = MeanPoolConv(dim_in, dim_out, f_size, prefix=name, spectral_norm=spectral_norm) else: conv = UpsampleConv(dim_in, dim_out, f_size, prefix=name, spectral_norm=spectral_norm) skip_models.add_module(name + '_skip', conv) finish_layer_2d(models, name, dim_x, dim_y, dim_in, nonlinearity=nonlinearity, **layer_args) # Up or down sample if resample == 'down': conv = Conv2d(dim_in, dim_in, f_size, 1, 1) models.add_module(name + '_stage1', conv) finish_layer_2d(models, name + '_stage1', dim_x // 2, dim_y // 2, dim_in, nonlinearity=nonlinearity, **layer_args) else: conv = UpsampleConv(dim_in, dim_out, f_size, prefix=name + '_stage1', spectral_norm=spectral_norm) models.add_module(name + '_stage1', conv) finish_layer_2d(models, name + '_stage1', dim_x * 2, dim_y * 2, dim_out, nonlinearity=nonlinearity, **layer_args) if resample == 'down': conv = ConvMeanPool(dim_in, dim_out, f_size, prefix=name, spectral_norm=spectral_norm) elif resample == 'up': conv = Conv2d(dim_out, dim_out, f_size, 1, 1) else: raise Exception('invalid resample value') models.add_module(name + '_stage2', conv) self.models = models self.skip_models = skip_models def forward(self, x): x_ = x x = self.models(x_) x_ = self.skip_models(x_) return x + x_ class ResDecoder(nn.Module): def __init__(self, shape, dim_in=None, f_size=3, dim_h=64, n_steps=3, nonlinearity='ReLU', output_nonlinearity=None, **layer_args): super(ResDecoder, self).__init__() models = nn.Sequential() dim_h_ = dim_h logger.debug('Output shape: {}'.format(shape)) dim_x_, dim_y_, dim_out_ = shape dim_x = dim_x_ dim_y = dim_y_ dim_h = dim_h_ nonlinearity = get_nonlinearity(nonlinearity) self.output_nonlinearity = output_nonlinearity for n in range(n_steps): dim_x //= 2 dim_y //= 2 if n < n_steps - 1: dim_h *= 2 dim_out = dim_x * dim_y * dim_h name = 'initial_({}/{})_0'.format(dim_in, dim_out) models.add_module(name, nn.Linear(dim_in, dim_out)) models.add_module(name + '_reshape', View(-1, dim_h, dim_x, dim_y)) finish_layer_2d(models, name, dim_x, dim_y, dim_h, nonlinearity=nonlinearity, **layer_args) dim_out = dim_h for i in range(n_steps): dim_in = dim_out dim_out = dim_in // 2 name = 'resblock_({}/{})_{}'.format(dim_in, dim_out, i + 1) resblock = ResBlock(dim_in, dim_out, dim_x, dim_y, f_size, resample='up', name=name, **layer_args) models.add_module(name, resblock) dim_x *= 2 dim_y *= 2 name = 'conv_({}/{})_{}'.format(dim_in, dim_out, 'final') finish_layer_2d(models, name, dim_x, dim_y, dim_out, nonlinearity=nonlinearity, **layer_args) models.add_module(name, nn.ConvTranspose2d( dim_out, dim_out_, f_size, 1, 1, bias=False)) self.models = models def forward(self, x, nonlinearity=None, **nonlinearity_args): if nonlinearity is None: nonlinearity = self.output_nonlinearity elif not nonlinearity: nonlinearity = None x = self.models(x) return apply_nonlinearity(x, nonlinearity, **nonlinearity_args) class ResEncoder(BaseNet): def __init__(self, shape, dim_out=None, dim_h=64, fully_connected_layers=None, f_size=3, n_steps=3, nonlinearity='ReLU', output_nonlinearity=None, spectral_norm=False, **layer_args): super(ResEncoder, self).__init__( nonlinearity=nonlinearity, output_nonlinearity=output_nonlinearity) Conv2d = SNConv2d if spectral_norm else nn.Conv2d Linear = SNLinear if spectral_norm else nn.Linear dim_out_ = dim_out fully_connected_layers = fully_connected_layers or [] if isinstance(fully_connected_layers, int): fully_connected_layers = [fully_connected_layers] logger.debug('Input shape: {}'.format(shape)) dim_x, dim_y, dim_in = shape dim_out = dim_h name = 'conv_({}/{})_0'.format(dim_in, dim_out) self.models.add_module(name, Conv2d(dim_in, dim_out, f_size, 1, 1, bias=False)) dim_out = dim_h for i in range(n_steps): dim_in = dim_out dim_out = dim_in * 2 name = 'resblock_({}/{})_{}'.format(dim_in, dim_out, i + 1) resblock = ResBlock(dim_in, dim_out, dim_x, dim_y, f_size, resample='down', name=name, spectral_norm=spectral_norm, **layer_args) self.models.add_module(name, resblock) dim_x //= 2 dim_y //= 2 final_depth = dim_out dim_out = dim_x * dim_y * dim_out self.models.add_module('final_reshape', View(-1, dim_out)) self.models.add_module('final_reshape_{}x{}x{}to{}' .format(dim_x, dim_y, final_depth, dim_out), View(-1, dim_out)) dim_out = self.add_linear_layers(dim_out, fully_connected_layers, Linear=Linear, **layer_args) self.add_output_layer(dim_out, dim_out_, Linear=Linear)
bsd-3-clause
heli522/scikit-learn
examples/cluster/plot_color_quantization.py
295
3443
# -*- coding: utf-8 -*- """ ================================== Color Quantization using K-Means ================================== Performs a pixel-wise Vector Quantization (VQ) of an image of the summer palace (China), reducing the number of colors required to show the image from 96,615 unique colors to 64, while preserving the overall appearance quality. In this example, pixels are represented in a 3D-space and K-means is used to find 64 color clusters. In the image processing literature, the codebook obtained from K-means (the cluster centers) is called the color palette. Using a single byte, up to 256 colors can be addressed, whereas an RGB encoding requires 3 bytes per pixel. The GIF file format, for example, uses such a palette. For comparison, a quantized image using a random codebook (colors picked up randomly) is also shown. """ # Authors: Robert Layton <robertlayton@gmail.com> # Olivier Grisel <olivier.grisel@ensta.org> # Mathieu Blondel <mathieu@mblondel.org> # # License: BSD 3 clause print(__doc__) import numpy as np import matplotlib.pyplot as plt from sklearn.cluster import KMeans from sklearn.metrics import pairwise_distances_argmin from sklearn.datasets import load_sample_image from sklearn.utils import shuffle from time import time n_colors = 64 # Load the Summer Palace photo china = load_sample_image("china.jpg") # Convert to floats instead of the default 8 bits integer coding. Dividing by # 255 is important so that plt.imshow behaves works well on float data (need to # be in the range [0-1] china = np.array(china, dtype=np.float64) / 255 # Load Image and transform to a 2D numpy array. w, h, d = original_shape = tuple(china.shape) assert d == 3 image_array = np.reshape(china, (w * h, d)) print("Fitting model on a small sub-sample of the data") t0 = time() image_array_sample = shuffle(image_array, random_state=0)[:1000] kmeans = KMeans(n_clusters=n_colors, random_state=0).fit(image_array_sample) print("done in %0.3fs." % (time() - t0)) # Get labels for all points print("Predicting color indices on the full image (k-means)") t0 = time() labels = kmeans.predict(image_array) print("done in %0.3fs." % (time() - t0)) codebook_random = shuffle(image_array, random_state=0)[:n_colors + 1] print("Predicting color indices on the full image (random)") t0 = time() labels_random = pairwise_distances_argmin(codebook_random, image_array, axis=0) print("done in %0.3fs." % (time() - t0)) def recreate_image(codebook, labels, w, h): """Recreate the (compressed) image from the code book & labels""" d = codebook.shape[1] image = np.zeros((w, h, d)) label_idx = 0 for i in range(w): for j in range(h): image[i][j] = codebook[labels[label_idx]] label_idx += 1 return image # Display all results, alongside original image plt.figure(1) plt.clf() ax = plt.axes([0, 0, 1, 1]) plt.axis('off') plt.title('Original image (96,615 colors)') plt.imshow(china) plt.figure(2) plt.clf() ax = plt.axes([0, 0, 1, 1]) plt.axis('off') plt.title('Quantized image (64 colors, K-Means)') plt.imshow(recreate_image(kmeans.cluster_centers_, labels, w, h)) plt.figure(3) plt.clf() ax = plt.axes([0, 0, 1, 1]) plt.axis('off') plt.title('Quantized image (64 colors, Random)') plt.imshow(recreate_image(codebook_random, labels_random, w, h)) plt.show()
bsd-3-clause
arabenjamin/scikit-learn
examples/linear_model/plot_ard.py
247
2622
""" ================================================== Automatic Relevance Determination Regression (ARD) ================================================== Fit regression model with Bayesian Ridge Regression. See :ref:`bayesian_ridge_regression` for more information on the regressor. Compared to the OLS (ordinary least squares) estimator, the coefficient weights are slightly shifted toward zeros, which stabilises them. The histogram of the estimated weights is very peaked, as a sparsity-inducing prior is implied on the weights. The estimation of the model is done by iteratively maximizing the marginal log-likelihood of the observations. """ print(__doc__) import numpy as np import matplotlib.pyplot as plt from scipy import stats from sklearn.linear_model import ARDRegression, LinearRegression ############################################################################### # Generating simulated data with Gaussian weights # Parameters of the example np.random.seed(0) n_samples, n_features = 100, 100 # Create Gaussian data X = np.random.randn(n_samples, n_features) # Create weigts with a precision lambda_ of 4. lambda_ = 4. w = np.zeros(n_features) # Only keep 10 weights of interest relevant_features = np.random.randint(0, n_features, 10) for i in relevant_features: w[i] = stats.norm.rvs(loc=0, scale=1. / np.sqrt(lambda_)) # Create noite with a precision alpha of 50. alpha_ = 50. noise = stats.norm.rvs(loc=0, scale=1. / np.sqrt(alpha_), size=n_samples) # Create the target y = np.dot(X, w) + noise ############################################################################### # Fit the ARD Regression clf = ARDRegression(compute_score=True) clf.fit(X, y) ols = LinearRegression() ols.fit(X, y) ############################################################################### # Plot the true weights, the estimated weights and the histogram of the # weights plt.figure(figsize=(6, 5)) plt.title("Weights of the model") plt.plot(clf.coef_, 'b-', label="ARD estimate") plt.plot(ols.coef_, 'r--', label="OLS estimate") plt.plot(w, 'g-', label="Ground truth") plt.xlabel("Features") plt.ylabel("Values of the weights") plt.legend(loc=1) plt.figure(figsize=(6, 5)) plt.title("Histogram of the weights") plt.hist(clf.coef_, bins=n_features, log=True) plt.plot(clf.coef_[relevant_features], 5 * np.ones(len(relevant_features)), 'ro', label="Relevant features") plt.ylabel("Features") plt.xlabel("Values of the weights") plt.legend(loc=1) plt.figure(figsize=(6, 5)) plt.title("Marginal log-likelihood") plt.plot(clf.scores_) plt.ylabel("Score") plt.xlabel("Iterations") plt.show()
bsd-3-clause
will-cromar/needy
recent_trend.py
1
3087
from sklearn import linear_model from matplotlib import pyplot from price_parsing import * from xkcd import xkcdify from datetime import datetime DEFAULT_SAMPLES = 50 DEFUALT_LOOKBACK = 200 def extendGraphByN(X, n): """ Extend the domain of X by n :param X: The current domain in sklearn format :param n: The number of units (usually ordinal dates) to extend the domain by :return: Extended domain """ end = X[-1][0] + 1 # Starting point of extension extension = map(lambda i: [i], range(end + 1, end + n)) return X + extension def predictRecentTrend(X, y, samples): """ Creates a linear regression across recent datapoints :param X: The domain to feed into regression model (sklearn format) :param y: The range to fit the regression model to (floats) :param samples: The number of days to use in the regression :return: Dataset representing the regression model """ # Get sample recent points X = [[date] for date in X[-samples:]] y = y[-samples:] # Create regressor and fit data reg = linear_model.LinearRegression() reg.fit(X, y) # Extend domain and predict values across it domain = extendGraphByN(X, samples) pred = reg.predict(domain) return domain, pred def graphRegression(ground_truth, regression): """ Saves a png of the graph of the Dataset arguments representing a ground truth and regression models :param ticker: Name of company's ticker. Uses as the save ont :param ground_truth: (X,y) tuple representing the actual values :param regression_plots: (X,y) tuple representing a prediction :param kwargs: Keyword arguments to use with matplotlib :return: None """ # Initialize the plot with XKCD themes pyplot.figure() xkcdify(pyplot) # Unpack the data from ground_truth dates = map(lambda date: datetime.fromordinal(date), ground_truth[0]) # Convert dates from ordinal form prices = ground_truth[1] # Scatter-plot the ground_truth data pyplot.plot(dates, prices, "w-") # Plot regression model X = [date[0] for date in regression[0]] y = regression[1] pyplot.plot(X, y, "w--", linewidth=2) # Line is thicker than ground-truth # Label, title, and save the graph pyplot.xlabel("Dates") pyplot.ylabel("Prices") def graphRecentTrend(ticker, samples=DEFAULT_SAMPLES, lookback=DEFUALT_LOOKBACK): """ Create a graph of a stocks recent trend. :param ticker: Company's ticker name :param samples: Number of samples to consider when graphing :param lookback: Number of previous points to include in graph :return: None """ # Grab the stock prices data = getStockPrices(ticker, frequency="daily") dates, prices = preprocessStocks(data[-lookback:]) # Pack the ground truth and the predicted values recentTrend = predictRecentTrend(dates, prices, samples) groundTruth = (dates, prices) # Graph the trend and save it graphRegression(groundTruth, recentTrend) pyplot.savefig(ticker + "linear.png", transparent=True)
mit
richardgroves/namebench
nb_third_party/dns/node.py
215
5914
# Copyright (C) 2001-2007, 2009, 2010 Nominum, Inc. # # Permission to use, copy, modify, and distribute this software and its # documentation for any purpose with or without fee is hereby granted, # provided that the above copyright notice and this permission notice # appear in all copies. # # THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES # WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF # MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR # ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES # WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN # ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT # OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. """DNS nodes. A node is a set of rdatasets.""" import StringIO import dns.rdataset import dns.rdatatype import dns.renderer class Node(object): """A DNS node. A node is a set of rdatasets @ivar rdatasets: the node's rdatasets @type rdatasets: list of dns.rdataset.Rdataset objects""" __slots__ = ['rdatasets'] def __init__(self): """Initialize a DNS node. """ self.rdatasets = []; def to_text(self, name, **kw): """Convert a node to text format. Each rdataset at the node is printed. Any keyword arguments to this method are passed on to the rdataset's to_text() method. @param name: the owner name of the rdatasets @type name: dns.name.Name object @rtype: string """ s = StringIO.StringIO() for rds in self.rdatasets: print >> s, rds.to_text(name, **kw) return s.getvalue()[:-1] def __repr__(self): return '<DNS node ' + str(id(self)) + '>' def __eq__(self, other): """Two nodes are equal if they have the same rdatasets. @rtype: bool """ # # This is inefficient. Good thing we don't need to do it much. # for rd in self.rdatasets: if rd not in other.rdatasets: return False for rd in other.rdatasets: if rd not in self.rdatasets: return False return True def __ne__(self, other): return not self.__eq__(other) def __len__(self): return len(self.rdatasets) def __iter__(self): return iter(self.rdatasets) def find_rdataset(self, rdclass, rdtype, covers=dns.rdatatype.NONE, create=False): """Find an rdataset matching the specified properties in the current node. @param rdclass: The class of the rdataset @type rdclass: int @param rdtype: The type of the rdataset @type rdtype: int @param covers: The covered type. Usually this value is dns.rdatatype.NONE, but if the rdtype is dns.rdatatype.SIG or dns.rdatatype.RRSIG, then the covers value will be the rdata type the SIG/RRSIG covers. The library treats the SIG and RRSIG types as if they were a family of types, e.g. RRSIG(A), RRSIG(NS), RRSIG(SOA). This makes RRSIGs much easier to work with than if RRSIGs covering different rdata types were aggregated into a single RRSIG rdataset. @type covers: int @param create: If True, create the rdataset if it is not found. @type create: bool @raises KeyError: An rdataset of the desired type and class does not exist and I{create} is not True. @rtype: dns.rdataset.Rdataset object """ for rds in self.rdatasets: if rds.match(rdclass, rdtype, covers): return rds if not create: raise KeyError rds = dns.rdataset.Rdataset(rdclass, rdtype) self.rdatasets.append(rds) return rds def get_rdataset(self, rdclass, rdtype, covers=dns.rdatatype.NONE, create=False): """Get an rdataset matching the specified properties in the current node. None is returned if an rdataset of the specified type and class does not exist and I{create} is not True. @param rdclass: The class of the rdataset @type rdclass: int @param rdtype: The type of the rdataset @type rdtype: int @param covers: The covered type. @type covers: int @param create: If True, create the rdataset if it is not found. @type create: bool @rtype: dns.rdataset.Rdataset object or None """ try: rds = self.find_rdataset(rdclass, rdtype, covers, create) except KeyError: rds = None return rds def delete_rdataset(self, rdclass, rdtype, covers=dns.rdatatype.NONE): """Delete the rdataset matching the specified properties in the current node. If a matching rdataset does not exist, it is not an error. @param rdclass: The class of the rdataset @type rdclass: int @param rdtype: The type of the rdataset @type rdtype: int @param covers: The covered type. @type covers: int """ rds = self.get_rdataset(rdclass, rdtype, covers) if not rds is None: self.rdatasets.remove(rds) def replace_rdataset(self, replacement): """Replace an rdataset. It is not an error if there is no rdataset matching I{replacement}. Ownership of the I{replacement} object is transferred to the node; in other words, this method does not store a copy of I{replacement} at the node, it stores I{replacement} itself. """ self.delete_rdataset(replacement.rdclass, replacement.rdtype, replacement.covers) self.rdatasets.append(replacement)
apache-2.0
negrinho/deep_architect
examples/mnist_with_logging/main.py
1
2929
import deep_architect.utils as ut # Make sure that only one GPU is visible. if __name__ == '__main__': cfg = ut.get_config() if cfg['use_gpu']: import deep_architect.contrib.misc.gpu_utils as gpu_utils gpu_id = gpu_utils.get_available_gpu(0.1, 5.0) print("Using GPU %d" % gpu_id) assert gpu_id is not None gpu_utils.set_visible_gpus([gpu_id]) from deep_architect.contrib.misc.datasets.loaders import load_mnist from deep_architect.contrib.misc.evaluators.tensorflow.classification import SimpleClassifierEvaluator from deep_architect.contrib.misc.datasets.dataset import InMemoryDataset import deep_architect.contrib.misc.search_spaces.tensorflow.dnn as css_dnn import deep_architect.search_logging as sl import deep_architect.visualization as vi import deep_architect.modules as mo import deep_architect.utils as ut from deep_architect.searchers.random import RandomSearcher def main(): # Loading the config file. cfg = ut.get_config() num_classes = 10 num_samples = cfg['num_samples'] # Loading the data. (Xtrain, ytrain, Xval, yval, Xtest, ytest) = load_mnist('data/mnist') train_dataset = InMemoryDataset(Xtrain, ytrain, True) val_dataset = InMemoryDataset(Xval, yval, False) test_dataset = InMemoryDataset(Xtest, ytest, False) # Creating up the evaluator. evaluator = SimpleClassifierEvaluator( train_dataset, val_dataset, num_classes, './temp', max_eval_time_in_minutes=cfg['max_eval_time_in_minutes'], log_output_to_terminal=True, test_dataset=test_dataset) # Creating the search space. search_space_fn = lambda: css_dnn.dnn_net(num_classes) sl.create_search_folderpath( cfg["folderpath"], cfg["search_name"], delete_if_exists=cfg['delete_if_exists'], abort_if_exists=False, create_parent_folders=True) # Creating the searcher. searcher = RandomSearcher(search_space_fn) # Search loop. for evaluation_id in range(num_samples): eval_logger = sl.EvaluationLogger(cfg["folderpath"], cfg["search_name"], evaluation_id) if not eval_logger.config_exists(): inputs, outputs, hyperp_value_lst, eval_token = searcher.sample() results = evaluator.eval(inputs, outputs) # Logging results (including graph). eval_logger.log_config(hyperp_value_lst, eval_token) eval_logger.log_results(results) vi.draw_graph( outputs, draw_module_hyperparameter_info=True, print_to_screen=False, out_folderpath=eval_logger.get_evaluation_data_folderpath()) # Updating the searcher given the results of logging. searcher.update(results['validation_accuracy'], eval_token) if __name__ == '__main__': main()
mit
heli522/scikit-learn
sklearn/tests/test_grid_search.py
83
28713
""" Testing for grid search module (sklearn.grid_search) """ from collections import Iterable, Sized from sklearn.externals.six.moves import cStringIO as StringIO from sklearn.externals.six.moves import xrange from itertools import chain, product import pickle import sys import numpy as np import scipy.sparse as sp from sklearn.utils.testing import assert_equal from sklearn.utils.testing import assert_not_equal from sklearn.utils.testing import assert_raises from sklearn.utils.testing import assert_warns from sklearn.utils.testing import assert_raise_message from sklearn.utils.testing import assert_false, assert_true from sklearn.utils.testing import assert_array_equal from sklearn.utils.testing import assert_almost_equal from sklearn.utils.testing import assert_array_almost_equal from sklearn.utils.testing import assert_no_warnings from sklearn.utils.testing import ignore_warnings from sklearn.utils.mocking import CheckingClassifier, MockDataFrame from scipy.stats import bernoulli, expon, uniform from sklearn.externals.six.moves import zip from sklearn.base import BaseEstimator from sklearn.datasets import make_classification from sklearn.datasets import make_blobs from sklearn.datasets import make_multilabel_classification from sklearn.grid_search import (GridSearchCV, RandomizedSearchCV, ParameterGrid, ParameterSampler, ChangedBehaviorWarning) from sklearn.svm import LinearSVC, SVC from sklearn.tree import DecisionTreeRegressor from sklearn.tree import DecisionTreeClassifier from sklearn.cluster import KMeans from sklearn.neighbors import KernelDensity from sklearn.metrics import f1_score from sklearn.metrics import make_scorer from sklearn.metrics import roc_auc_score from sklearn.cross_validation import KFold, StratifiedKFold, FitFailedWarning from sklearn.preprocessing import Imputer from sklearn.pipeline import Pipeline # Neither of the following two estimators inherit from BaseEstimator, # to test hyperparameter search on user-defined classifiers. class MockClassifier(object): """Dummy classifier to test the cross-validation""" def __init__(self, foo_param=0): self.foo_param = foo_param def fit(self, X, Y): assert_true(len(X) == len(Y)) return self def predict(self, T): return T.shape[0] predict_proba = predict decision_function = predict transform = predict def score(self, X=None, Y=None): if self.foo_param > 1: score = 1. else: score = 0. return score def get_params(self, deep=False): return {'foo_param': self.foo_param} def set_params(self, **params): self.foo_param = params['foo_param'] return self class LinearSVCNoScore(LinearSVC): """An LinearSVC classifier that has no score method.""" @property def score(self): raise AttributeError X = np.array([[-1, -1], [-2, -1], [1, 1], [2, 1]]) y = np.array([1, 1, 2, 2]) def assert_grid_iter_equals_getitem(grid): assert_equal(list(grid), [grid[i] for i in range(len(grid))]) def test_parameter_grid(): # Test basic properties of ParameterGrid. params1 = {"foo": [1, 2, 3]} grid1 = ParameterGrid(params1) assert_true(isinstance(grid1, Iterable)) assert_true(isinstance(grid1, Sized)) assert_equal(len(grid1), 3) assert_grid_iter_equals_getitem(grid1) params2 = {"foo": [4, 2], "bar": ["ham", "spam", "eggs"]} grid2 = ParameterGrid(params2) assert_equal(len(grid2), 6) # loop to assert we can iterate over the grid multiple times for i in xrange(2): # tuple + chain transforms {"a": 1, "b": 2} to ("a", 1, "b", 2) points = set(tuple(chain(*(sorted(p.items())))) for p in grid2) assert_equal(points, set(("bar", x, "foo", y) for x, y in product(params2["bar"], params2["foo"]))) assert_grid_iter_equals_getitem(grid2) # Special case: empty grid (useful to get default estimator settings) empty = ParameterGrid({}) assert_equal(len(empty), 1) assert_equal(list(empty), [{}]) assert_grid_iter_equals_getitem(empty) assert_raises(IndexError, lambda: empty[1]) has_empty = ParameterGrid([{'C': [1, 10]}, {}, {'C': [.5]}]) assert_equal(len(has_empty), 4) assert_equal(list(has_empty), [{'C': 1}, {'C': 10}, {}, {'C': .5}]) assert_grid_iter_equals_getitem(has_empty) def test_grid_search(): # Test that the best estimator contains the right value for foo_param clf = MockClassifier() grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]}, verbose=3) # make sure it selects the smallest parameter in case of ties old_stdout = sys.stdout sys.stdout = StringIO() grid_search.fit(X, y) sys.stdout = old_stdout assert_equal(grid_search.best_estimator_.foo_param, 2) for i, foo_i in enumerate([1, 2, 3]): assert_true(grid_search.grid_scores_[i][0] == {'foo_param': foo_i}) # Smoke test the score etc: grid_search.score(X, y) grid_search.predict_proba(X) grid_search.decision_function(X) grid_search.transform(X) # Test exception handling on scoring grid_search.scoring = 'sklearn' assert_raises(ValueError, grid_search.fit, X, y) @ignore_warnings def test_grid_search_no_score(): # Test grid-search on classifier that has no score function. clf = LinearSVC(random_state=0) X, y = make_blobs(random_state=0, centers=2) Cs = [.1, 1, 10] clf_no_score = LinearSVCNoScore(random_state=0) grid_search = GridSearchCV(clf, {'C': Cs}, scoring='accuracy') grid_search.fit(X, y) grid_search_no_score = GridSearchCV(clf_no_score, {'C': Cs}, scoring='accuracy') # smoketest grid search grid_search_no_score.fit(X, y) # check that best params are equal assert_equal(grid_search_no_score.best_params_, grid_search.best_params_) # check that we can call score and that it gives the correct result assert_equal(grid_search.score(X, y), grid_search_no_score.score(X, y)) # giving no scoring function raises an error grid_search_no_score = GridSearchCV(clf_no_score, {'C': Cs}) assert_raise_message(TypeError, "no scoring", grid_search_no_score.fit, [[1]]) def test_grid_search_score_method(): X, y = make_classification(n_samples=100, n_classes=2, flip_y=.2, random_state=0) clf = LinearSVC(random_state=0) grid = {'C': [.1]} search_no_scoring = GridSearchCV(clf, grid, scoring=None).fit(X, y) search_accuracy = GridSearchCV(clf, grid, scoring='accuracy').fit(X, y) search_no_score_method_auc = GridSearchCV(LinearSVCNoScore(), grid, scoring='roc_auc').fit(X, y) search_auc = GridSearchCV(clf, grid, scoring='roc_auc').fit(X, y) # Check warning only occurs in situation where behavior changed: # estimator requires score method to compete with scoring parameter score_no_scoring = assert_no_warnings(search_no_scoring.score, X, y) score_accuracy = assert_warns(ChangedBehaviorWarning, search_accuracy.score, X, y) score_no_score_auc = assert_no_warnings(search_no_score_method_auc.score, X, y) score_auc = assert_warns(ChangedBehaviorWarning, search_auc.score, X, y) # ensure the test is sane assert_true(score_auc < 1.0) assert_true(score_accuracy < 1.0) assert_not_equal(score_auc, score_accuracy) assert_almost_equal(score_accuracy, score_no_scoring) assert_almost_equal(score_auc, score_no_score_auc) def test_trivial_grid_scores(): # Test search over a "grid" with only one point. # Non-regression test: grid_scores_ wouldn't be set by GridSearchCV. clf = MockClassifier() grid_search = GridSearchCV(clf, {'foo_param': [1]}) grid_search.fit(X, y) assert_true(hasattr(grid_search, "grid_scores_")) random_search = RandomizedSearchCV(clf, {'foo_param': [0]}, n_iter=1) random_search.fit(X, y) assert_true(hasattr(random_search, "grid_scores_")) def test_no_refit(): # Test that grid search can be used for model selection only clf = MockClassifier() grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]}, refit=False) grid_search.fit(X, y) assert_true(hasattr(grid_search, "best_params_")) def test_grid_search_error(): # Test that grid search will capture errors on data with different # length X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0) clf = LinearSVC() cv = GridSearchCV(clf, {'C': [0.1, 1.0]}) assert_raises(ValueError, cv.fit, X_[:180], y_) def test_grid_search_iid(): # test the iid parameter # noise-free simple 2d-data X, y = make_blobs(centers=[[0, 0], [1, 0], [0, 1], [1, 1]], random_state=0, cluster_std=0.1, shuffle=False, n_samples=80) # split dataset into two folds that are not iid # first one contains data of all 4 blobs, second only from two. mask = np.ones(X.shape[0], dtype=np.bool) mask[np.where(y == 1)[0][::2]] = 0 mask[np.where(y == 2)[0][::2]] = 0 # this leads to perfect classification on one fold and a score of 1/3 on # the other svm = SVC(kernel='linear') # create "cv" for splits cv = [[mask, ~mask], [~mask, mask]] # once with iid=True (default) grid_search = GridSearchCV(svm, param_grid={'C': [1, 10]}, cv=cv) grid_search.fit(X, y) first = grid_search.grid_scores_[0] assert_equal(first.parameters['C'], 1) assert_array_almost_equal(first.cv_validation_scores, [1, 1. / 3.]) # for first split, 1/4 of dataset is in test, for second 3/4. # take weighted average assert_almost_equal(first.mean_validation_score, 1 * 1. / 4. + 1. / 3. * 3. / 4.) # once with iid=False grid_search = GridSearchCV(svm, param_grid={'C': [1, 10]}, cv=cv, iid=False) grid_search.fit(X, y) first = grid_search.grid_scores_[0] assert_equal(first.parameters['C'], 1) # scores are the same as above assert_array_almost_equal(first.cv_validation_scores, [1, 1. / 3.]) # averaged score is just mean of scores assert_almost_equal(first.mean_validation_score, np.mean(first.cv_validation_scores)) def test_grid_search_one_grid_point(): X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0) param_dict = {"C": [1.0], "kernel": ["rbf"], "gamma": [0.1]} clf = SVC() cv = GridSearchCV(clf, param_dict) cv.fit(X_, y_) clf = SVC(C=1.0, kernel="rbf", gamma=0.1) clf.fit(X_, y_) assert_array_equal(clf.dual_coef_, cv.best_estimator_.dual_coef_) def test_grid_search_bad_param_grid(): param_dict = {"C": 1.0} clf = SVC() assert_raises(ValueError, GridSearchCV, clf, param_dict) param_dict = {"C": []} clf = SVC() assert_raises(ValueError, GridSearchCV, clf, param_dict) param_dict = {"C": np.ones(6).reshape(3, 2)} clf = SVC() assert_raises(ValueError, GridSearchCV, clf, param_dict) def test_grid_search_sparse(): # Test that grid search works with both dense and sparse matrices X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0) clf = LinearSVC() cv = GridSearchCV(clf, {'C': [0.1, 1.0]}) cv.fit(X_[:180], y_[:180]) y_pred = cv.predict(X_[180:]) C = cv.best_estimator_.C X_ = sp.csr_matrix(X_) clf = LinearSVC() cv = GridSearchCV(clf, {'C': [0.1, 1.0]}) cv.fit(X_[:180].tocoo(), y_[:180]) y_pred2 = cv.predict(X_[180:]) C2 = cv.best_estimator_.C assert_true(np.mean(y_pred == y_pred2) >= .9) assert_equal(C, C2) def test_grid_search_sparse_scoring(): X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0) clf = LinearSVC() cv = GridSearchCV(clf, {'C': [0.1, 1.0]}, scoring="f1") cv.fit(X_[:180], y_[:180]) y_pred = cv.predict(X_[180:]) C = cv.best_estimator_.C X_ = sp.csr_matrix(X_) clf = LinearSVC() cv = GridSearchCV(clf, {'C': [0.1, 1.0]}, scoring="f1") cv.fit(X_[:180], y_[:180]) y_pred2 = cv.predict(X_[180:]) C2 = cv.best_estimator_.C assert_array_equal(y_pred, y_pred2) assert_equal(C, C2) # Smoke test the score # np.testing.assert_allclose(f1_score(cv.predict(X_[:180]), y[:180]), # cv.score(X_[:180], y[:180])) # test loss where greater is worse def f1_loss(y_true_, y_pred_): return -f1_score(y_true_, y_pred_) F1Loss = make_scorer(f1_loss, greater_is_better=False) cv = GridSearchCV(clf, {'C': [0.1, 1.0]}, scoring=F1Loss) cv.fit(X_[:180], y_[:180]) y_pred3 = cv.predict(X_[180:]) C3 = cv.best_estimator_.C assert_equal(C, C3) assert_array_equal(y_pred, y_pred3) def test_grid_search_precomputed_kernel(): # Test that grid search works when the input features are given in the # form of a precomputed kernel matrix X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0) # compute the training kernel matrix corresponding to the linear kernel K_train = np.dot(X_[:180], X_[:180].T) y_train = y_[:180] clf = SVC(kernel='precomputed') cv = GridSearchCV(clf, {'C': [0.1, 1.0]}) cv.fit(K_train, y_train) assert_true(cv.best_score_ >= 0) # compute the test kernel matrix K_test = np.dot(X_[180:], X_[:180].T) y_test = y_[180:] y_pred = cv.predict(K_test) assert_true(np.mean(y_pred == y_test) >= 0) # test error is raised when the precomputed kernel is not array-like # or sparse assert_raises(ValueError, cv.fit, K_train.tolist(), y_train) def test_grid_search_precomputed_kernel_error_nonsquare(): # Test that grid search returns an error with a non-square precomputed # training kernel matrix K_train = np.zeros((10, 20)) y_train = np.ones((10, )) clf = SVC(kernel='precomputed') cv = GridSearchCV(clf, {'C': [0.1, 1.0]}) assert_raises(ValueError, cv.fit, K_train, y_train) def test_grid_search_precomputed_kernel_error_kernel_function(): # Test that grid search returns an error when using a kernel_function X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0) kernel_function = lambda x1, x2: np.dot(x1, x2.T) clf = SVC(kernel=kernel_function) cv = GridSearchCV(clf, {'C': [0.1, 1.0]}) assert_raises(ValueError, cv.fit, X_, y_) class BrokenClassifier(BaseEstimator): """Broken classifier that cannot be fit twice""" def __init__(self, parameter=None): self.parameter = parameter def fit(self, X, y): assert_true(not hasattr(self, 'has_been_fit_')) self.has_been_fit_ = True def predict(self, X): return np.zeros(X.shape[0]) def test_refit(): # Regression test for bug in refitting # Simulates re-fitting a broken estimator; this used to break with # sparse SVMs. X = np.arange(100).reshape(10, 10) y = np.array([0] * 5 + [1] * 5) clf = GridSearchCV(BrokenClassifier(), [{'parameter': [0, 1]}], scoring="precision", refit=True) clf.fit(X, y) def test_gridsearch_nd(): # Pass X as list in GridSearchCV X_4d = np.arange(10 * 5 * 3 * 2).reshape(10, 5, 3, 2) y_3d = np.arange(10 * 7 * 11).reshape(10, 7, 11) check_X = lambda x: x.shape[1:] == (5, 3, 2) check_y = lambda x: x.shape[1:] == (7, 11) clf = CheckingClassifier(check_X=check_X, check_y=check_y) grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]}) grid_search.fit(X_4d, y_3d).score(X, y) assert_true(hasattr(grid_search, "grid_scores_")) def test_X_as_list(): # Pass X as list in GridSearchCV X = np.arange(100).reshape(10, 10) y = np.array([0] * 5 + [1] * 5) clf = CheckingClassifier(check_X=lambda x: isinstance(x, list)) cv = KFold(n=len(X), n_folds=3) grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]}, cv=cv) grid_search.fit(X.tolist(), y).score(X, y) assert_true(hasattr(grid_search, "grid_scores_")) def test_y_as_list(): # Pass y as list in GridSearchCV X = np.arange(100).reshape(10, 10) y = np.array([0] * 5 + [1] * 5) clf = CheckingClassifier(check_y=lambda x: isinstance(x, list)) cv = KFold(n=len(X), n_folds=3) grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]}, cv=cv) grid_search.fit(X, y.tolist()).score(X, y) assert_true(hasattr(grid_search, "grid_scores_")) def test_pandas_input(): # check cross_val_score doesn't destroy pandas dataframe types = [(MockDataFrame, MockDataFrame)] try: from pandas import Series, DataFrame types.append((DataFrame, Series)) except ImportError: pass X = np.arange(100).reshape(10, 10) y = np.array([0] * 5 + [1] * 5) for InputFeatureType, TargetType in types: # X dataframe, y series X_df, y_ser = InputFeatureType(X), TargetType(y) check_df = lambda x: isinstance(x, InputFeatureType) check_series = lambda x: isinstance(x, TargetType) clf = CheckingClassifier(check_X=check_df, check_y=check_series) grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]}) grid_search.fit(X_df, y_ser).score(X_df, y_ser) grid_search.predict(X_df) assert_true(hasattr(grid_search, "grid_scores_")) def test_unsupervised_grid_search(): # test grid-search with unsupervised estimator X, y = make_blobs(random_state=0) km = KMeans(random_state=0) grid_search = GridSearchCV(km, param_grid=dict(n_clusters=[2, 3, 4]), scoring='adjusted_rand_score') grid_search.fit(X, y) # ARI can find the right number :) assert_equal(grid_search.best_params_["n_clusters"], 3) # Now without a score, and without y grid_search = GridSearchCV(km, param_grid=dict(n_clusters=[2, 3, 4])) grid_search.fit(X) assert_equal(grid_search.best_params_["n_clusters"], 4) def test_gridsearch_no_predict(): # test grid-search with an estimator without predict. # slight duplication of a test from KDE def custom_scoring(estimator, X): return 42 if estimator.bandwidth == .1 else 0 X, _ = make_blobs(cluster_std=.1, random_state=1, centers=[[0, 1], [1, 0], [0, 0]]) search = GridSearchCV(KernelDensity(), param_grid=dict(bandwidth=[.01, .1, 1]), scoring=custom_scoring) search.fit(X) assert_equal(search.best_params_['bandwidth'], .1) assert_equal(search.best_score_, 42) def test_param_sampler(): # test basic properties of param sampler param_distributions = {"kernel": ["rbf", "linear"], "C": uniform(0, 1)} sampler = ParameterSampler(param_distributions=param_distributions, n_iter=10, random_state=0) samples = [x for x in sampler] assert_equal(len(samples), 10) for sample in samples: assert_true(sample["kernel"] in ["rbf", "linear"]) assert_true(0 <= sample["C"] <= 1) def test_randomized_search_grid_scores(): # Make a dataset with a lot of noise to get various kind of prediction # errors across CV folds and parameter settings X, y = make_classification(n_samples=200, n_features=100, n_informative=3, random_state=0) # XXX: as of today (scipy 0.12) it's not possible to set the random seed # of scipy.stats distributions: the assertions in this test should thus # not depend on the randomization params = dict(C=expon(scale=10), gamma=expon(scale=0.1)) n_cv_iter = 3 n_search_iter = 30 search = RandomizedSearchCV(SVC(), n_iter=n_search_iter, cv=n_cv_iter, param_distributions=params, iid=False) search.fit(X, y) assert_equal(len(search.grid_scores_), n_search_iter) # Check consistency of the structure of each cv_score item for cv_score in search.grid_scores_: assert_equal(len(cv_score.cv_validation_scores), n_cv_iter) # Because we set iid to False, the mean_validation score is the # mean of the fold mean scores instead of the aggregate sample-wise # mean score assert_almost_equal(np.mean(cv_score.cv_validation_scores), cv_score.mean_validation_score) assert_equal(list(sorted(cv_score.parameters.keys())), list(sorted(params.keys()))) # Check the consistency with the best_score_ and best_params_ attributes sorted_grid_scores = list(sorted(search.grid_scores_, key=lambda x: x.mean_validation_score)) best_score = sorted_grid_scores[-1].mean_validation_score assert_equal(search.best_score_, best_score) tied_best_params = [s.parameters for s in sorted_grid_scores if s.mean_validation_score == best_score] assert_true(search.best_params_ in tied_best_params, "best_params_={0} is not part of the" " tied best models: {1}".format( search.best_params_, tied_best_params)) def test_grid_search_score_consistency(): # test that correct scores are used clf = LinearSVC(random_state=0) X, y = make_blobs(random_state=0, centers=2) Cs = [.1, 1, 10] for score in ['f1', 'roc_auc']: grid_search = GridSearchCV(clf, {'C': Cs}, scoring=score) grid_search.fit(X, y) cv = StratifiedKFold(n_folds=3, y=y) for C, scores in zip(Cs, grid_search.grid_scores_): clf.set_params(C=C) scores = scores[2] # get the separate runs from grid scores i = 0 for train, test in cv: clf.fit(X[train], y[train]) if score == "f1": correct_score = f1_score(y[test], clf.predict(X[test])) elif score == "roc_auc": dec = clf.decision_function(X[test]) correct_score = roc_auc_score(y[test], dec) assert_almost_equal(correct_score, scores[i]) i += 1 def test_pickle(): # Test that a fit search can be pickled clf = MockClassifier() grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]}, refit=True) grid_search.fit(X, y) pickle.dumps(grid_search) # smoke test random_search = RandomizedSearchCV(clf, {'foo_param': [1, 2, 3]}, refit=True, n_iter=3) random_search.fit(X, y) pickle.dumps(random_search) # smoke test def test_grid_search_with_multioutput_data(): # Test search with multi-output estimator X, y = make_multilabel_classification(random_state=0) est_parameters = {"max_depth": [1, 2, 3, 4]} cv = KFold(y.shape[0], random_state=0) estimators = [DecisionTreeRegressor(random_state=0), DecisionTreeClassifier(random_state=0)] # Test with grid search cv for est in estimators: grid_search = GridSearchCV(est, est_parameters, cv=cv) grid_search.fit(X, y) for parameters, _, cv_validation_scores in grid_search.grid_scores_: est.set_params(**parameters) for i, (train, test) in enumerate(cv): est.fit(X[train], y[train]) correct_score = est.score(X[test], y[test]) assert_almost_equal(correct_score, cv_validation_scores[i]) # Test with a randomized search for est in estimators: random_search = RandomizedSearchCV(est, est_parameters, cv=cv, n_iter=3) random_search.fit(X, y) for parameters, _, cv_validation_scores in random_search.grid_scores_: est.set_params(**parameters) for i, (train, test) in enumerate(cv): est.fit(X[train], y[train]) correct_score = est.score(X[test], y[test]) assert_almost_equal(correct_score, cv_validation_scores[i]) def test_predict_proba_disabled(): # Test predict_proba when disabled on estimator. X = np.arange(20).reshape(5, -1) y = [0, 0, 1, 1, 1] clf = SVC(probability=False) gs = GridSearchCV(clf, {}, cv=2).fit(X, y) assert_false(hasattr(gs, "predict_proba")) def test_grid_search_allows_nans(): # Test GridSearchCV with Imputer X = np.arange(20, dtype=np.float64).reshape(5, -1) X[2, :] = np.nan y = [0, 0, 1, 1, 1] p = Pipeline([ ('imputer', Imputer(strategy='mean', missing_values='NaN')), ('classifier', MockClassifier()), ]) GridSearchCV(p, {'classifier__foo_param': [1, 2, 3]}, cv=2).fit(X, y) class FailingClassifier(BaseEstimator): """Classifier that raises a ValueError on fit()""" FAILING_PARAMETER = 2 def __init__(self, parameter=None): self.parameter = parameter def fit(self, X, y=None): if self.parameter == FailingClassifier.FAILING_PARAMETER: raise ValueError("Failing classifier failed as required") def predict(self, X): return np.zeros(X.shape[0]) def test_grid_search_failing_classifier(): # GridSearchCV with on_error != 'raise' # Ensures that a warning is raised and score reset where appropriate. X, y = make_classification(n_samples=20, n_features=10, random_state=0) clf = FailingClassifier() # refit=False because we only want to check that errors caused by fits # to individual folds will be caught and warnings raised instead. If # refit was done, then an exception would be raised on refit and not # caught by grid_search (expected behavior), and this would cause an # error in this test. gs = GridSearchCV(clf, [{'parameter': [0, 1, 2]}], scoring='accuracy', refit=False, error_score=0.0) assert_warns(FitFailedWarning, gs.fit, X, y) # Ensure that grid scores were set to zero as required for those fits # that are expected to fail. assert all(np.all(this_point.cv_validation_scores == 0.0) for this_point in gs.grid_scores_ if this_point.parameters['parameter'] == FailingClassifier.FAILING_PARAMETER) gs = GridSearchCV(clf, [{'parameter': [0, 1, 2]}], scoring='accuracy', refit=False, error_score=float('nan')) assert_warns(FitFailedWarning, gs.fit, X, y) assert all(np.all(np.isnan(this_point.cv_validation_scores)) for this_point in gs.grid_scores_ if this_point.parameters['parameter'] == FailingClassifier.FAILING_PARAMETER) def test_grid_search_failing_classifier_raise(): # GridSearchCV with on_error == 'raise' raises the error X, y = make_classification(n_samples=20, n_features=10, random_state=0) clf = FailingClassifier() # refit=False because we want to test the behaviour of the grid search part gs = GridSearchCV(clf, [{'parameter': [0, 1, 2]}], scoring='accuracy', refit=False, error_score='raise') # FailingClassifier issues a ValueError so this is what we look for. assert_raises(ValueError, gs.fit, X, y) def test_parameters_sampler_replacement(): # raise error if n_iter too large params = {'first': [0, 1], 'second': ['a', 'b', 'c']} sampler = ParameterSampler(params, n_iter=7) assert_raises(ValueError, list, sampler) # degenerates to GridSearchCV if n_iter the same as grid_size sampler = ParameterSampler(params, n_iter=6) samples = list(sampler) assert_equal(len(samples), 6) for values in ParameterGrid(params): assert_true(values in samples) # test sampling without replacement in a large grid params = {'a': range(10), 'b': range(10), 'c': range(10)} sampler = ParameterSampler(params, n_iter=99, random_state=42) samples = list(sampler) assert_equal(len(samples), 99) hashable_samples = ["a%db%dc%d" % (p['a'], p['b'], p['c']) for p in samples] assert_equal(len(set(hashable_samples)), 99) # doesn't go into infinite loops params_distribution = {'first': bernoulli(.5), 'second': ['a', 'b', 'c']} sampler = ParameterSampler(params_distribution, n_iter=7) samples = list(sampler) assert_equal(len(samples), 7)
bsd-3-clause
tensorflow/tensorflow-experimental_link_static_libraries_once
tensorflow/python/data/experimental/ops/distribute.py
9
15479
# Copyright 2019 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Distribution Strategy-related dataset transformations.""" from tensorflow.python.data.ops import dataset_ops from tensorflow.python.data.ops.options import ExternalStatePolicy from tensorflow.python.data.util import nest from tensorflow.python.framework import constant_op from tensorflow.python.framework import dtypes from tensorflow.python.framework import ops from tensorflow.python.framework import tensor_shape from tensorflow.python.framework import tensor_util from tensorflow.python.ops import array_ops from tensorflow.python.ops import gen_experimental_dataset_ops as ged_ops from tensorflow.python.util.tf_export import tf_export SHARD_HINT = -1 tf_export("data.experimental.SHARD_HINT").export_constant( __name__, "SHARD_HINT") class _AutoShardDataset(dataset_ops.UnaryDataset): """A `Dataset` that shards the `Dataset` automatically. This dataset takes in an existing dataset and tries to automatically figure out how to shard the dataset in a multi-worker scenario using graph rewrites. If the AutoShardPolicy is set to FILE, it walks up the dataset graph until it finds a reader dataset, then inserts a ShardDataset op before that node so that each worker only sees some files. If the AutoShardPolicy is set to DATA, it inserts a ShardDataset op at the end of the input pipeline, before any terminal PrefetchDataset if there is one. Additionally, if there is a RebatchDatasetV2 in the input pipeline, it is written to legacy RebatchDataset for correctness reasons, since RebatchDatasetV2 is incompatible with data sharding. If the AutoShardPolicy is set to AUTO, it tries to do file-based sharding. If it cannot find a reader dataset, it falls back to doing data-based sharding. If the AutoShardPolicy is set to OFF, it does nothing. Attributes: num_workers: Total number of workers to shard this dataset across. index: The current worker index (out of the total number of workers) this dataset is for. num_replicas: The total number of replicas across all workers. This is used only when sharding by data (either DATA or AUTO) in order to rewrite RebatchDatasetV2 to RebatchDataset. Raises: NotFoundError: If we cannot find a suitable reader dataset to begin automatically sharding the dataset. """ def __init__(self, input_dataset, num_workers, index, num_replicas=None): self._input_dataset = input_dataset self._element_spec = input_dataset.element_spec variant_tensor = ged_ops.auto_shard_dataset( self._input_dataset._variant_tensor, # pylint: disable=protected-access num_workers=num_workers, index=index, auto_shard_policy=int( input_dataset.options().experimental_distribute.auto_shard_policy), num_replicas=num_replicas, **self._flat_structure) super(_AutoShardDataset, self).__init__(input_dataset, variant_tensor) @property def element_spec(self): return self._element_spec def _AutoShardDatasetV1(input_dataset, num_workers, index, num_replicas=None): # pylint: disable=invalid-name return dataset_ops.DatasetV1Adapter( _AutoShardDataset(input_dataset, num_workers, index, num_replicas)) class _LegacyRebatchDataset(dataset_ops.UnaryDataset): """A `Dataset` that divides its input batches into `num_replicas` sub-batches. For each batch in the input dataset, _LegacyRebatchDataset will produce `num_replicas` smaller batches whose sizes add up to the original batch size. For example: ```python ds = tf.data.Dataset.range(8) ds = ds.batch(4) ds = _LegacyRebatchDataset(ds, num_replicas=3) for elem in ds: print(elem) >> [0, 1], [2, 3], [], [4, 5], [6, 7], [] ``` """ def __init__(self, input_dataset, num_replicas): """Creates a _LegacyRebatchDataset. Args: input_dataset: `Dataset` to rebatch. num_replicas: A `tf.int64` scalar, representing the number of sub-batches to split each batch from `input_dataset` into. """ def recalculate_batch_size(type_spec): """Recalculates the output_shape after dividing it by num_replicas.""" output_shape = type_spec._to_legacy_output_shapes() # pylint: disable=protected-access if not isinstance(output_shape, tensor_shape.TensorShape): return None # If the output shape is unknown, we set the batch dimension to unknown. if output_shape.rank is None: return None if len(output_shape) < 1: raise ValueError( "Invalid `input_dataset`. Expected a dataset whose elements " "have rank >= 1 but found a dataset whose elements are scalars. " "Fix the issue by adding the `batch` transformation to the " "dataset.") output_dims = [d.value for d in output_shape.dims] if output_dims[0] is not None and output_dims[0] % num_replicas == 0: return output_dims[0] // num_replicas # Set the batch dimension to unknown. If the global batch size does not # divide num_replicas evenly, the minibatches may have different sizes. return None def rebatch(type_spec): # pylint: disable=protected-access batch_size = recalculate_batch_size(type_spec) return type_spec._unbatch()._batch(batch_size) # pylint: enable=protected-access self._element_spec = nest.map_structure( rebatch, dataset_ops.get_structure(input_dataset)) # auto_shard rewrite assumes that there's normalize_to_dense before # rebatch_dataset. # LINT.IfChange input_dataset = dataset_ops.normalize_to_dense(input_dataset) variant_tensor = ged_ops.rebatch_dataset( input_dataset._variant_tensor, # pylint: disable=protected-access num_replicas=num_replicas, **self._flat_structure) # LINT.ThenChange(//tensorflow/core/grappler/optimizers/data/auto_shard.cc) super(_LegacyRebatchDataset, self).__init__(input_dataset, variant_tensor) @property def element_spec(self): return self._element_spec class _RemoteDataset(dataset_ops.DatasetSource): """Creates a dataset on a given `device` given a graph def.""" def __init__(self, graph_def, device, element_spec): self._elem_spec = element_spec with ops.device(device): variant_tensor = ged_ops.dataset_from_graph(graph_def) super(_RemoteDataset, self).__init__(variant_tensor) @property def element_spec(self): return self._elem_spec def replicate(dataset, devices): """A transformation that replicates `dataset` onto a list of devices. Args: dataset: A `tf.data.Dataset` object. devices: A list of devices to replicate the dataset on. Returns: A dictionary mapping device name to a dataset on that device. """ if not isinstance(dataset, dataset_ops.DatasetV2): raise TypeError( f"Invalid `dataset`. Expected a `tf.data.Dataset` object but " f"got {type(dataset)}.") # pylint: disable=protected-access dataset_device = dataset._variant_tensor.device datasets = {} if len(devices) == 1 and devices[0] == dataset_device: datasets[devices[0]] = dataset return datasets with ops.colocate_with(dataset._variant_tensor): dataset = dataset._apply_debug_options() graph_def = dataset._as_serialized_graph( strip_device_assignment=True, external_state_policy=ExternalStatePolicy.WARN) for device in devices: ds = _RemoteDataset(graph_def, device, dataset.element_spec) datasets[device] = ds return datasets def batch_sizes_for_worker(global_batch_size, num_workers, num_replicas_per_worker, worker_index): """Determines how to rebatch a dataset for the given worker. Given the global batch size, number of workers, number of replicas per worker, and worker index, returns the correct batch sizes for rebatching a dataset on worker `worker_index` of `num_workers`, such that each global step (across all workers and replicas) will consume global_batch_size elements. The returned value should be passed as the `batch_sizes` input parameter to `tf.data.experimental.rebatch()`. The returned batch sizes meet the following constraints: Let G = global_batch_size, W = num_workers, R = num_replicas_per_worker (A) for any worker, len(batch_sizes) = W * R (B) for any worker, sum(batch_sizes) == G (C) for any global step (i.e. R iterations on each worker), the sum of batches consumed by replicas across all workers is G. (D) any two batch sizes of any two replicas differs by at most one. For example, suppose we have G = 7, W = 2, R = 2, and suppose we have two files which each contain 7 elements: ```python # WORKER 0 batch_sizes_0 = batch_sizes_for_worker(global_batch_size=global_batch_size, num_workers=2, num_replicas_per_worker=2, worker_index=0) print(batch_sizes_0) >> [2, 2, 2, 1] dataset_0 = tf.data.Dataset.from_tensor_slices(["file_a", "file_b"]) dataset_0 = dataset_0.shard(num_shards, index=0) dataset_0 = dataset_0.batch(7) dataset_0 = dataset_0.apply(tf.data.experimental.rebatch(batch_sizes_0)) for elem in dataset_0: print(elem) >> [[A0, A1], [A2, A3], [A4, A5], [A6]] # WORKER 1 batch_sizes_1 = batch_sizes_for_worker(global_batch_size=global_batch_size, num_workers=2, num_replicas_per_worker=2, worker_index=1) print(batch_sizes_1) >> [2, 1, 2, 2] dataset_1 = tf.data.Dataset.from_tensor_slices(["file_a", "file_b"]) dataset_1 = dataset_1.shard(num_shards, index=1) dataset_1 = dataset_1.batch(7) dataset_1 = dataset_1.apply(tf.data.experimental.rebatch(batch_sizes_1)) for elem in dataset_1: print(elem) >> [[B0, B1], [B2], [B3, B4], [B5, B6]] ``` The above example will produce the following elements: Step 1: Worker 0 Replica 0: [A0, A1] Worker 0 Replica 1: [A2, A3] Worker 1 Replica 0: [B0, B1] Worker 1 Replica 1: [B2] Total batch size = 7 Step 2: Worker 0 Replica 0: [A4, A5] Worker 0 Replica 1: [A6] Worker 1 Replica 0: [B3, B4] Worker 1 Replica 1: [B5, B6] Total batch size = 7 Args: global_batch_size: A `tf.int64` scalar, representing the global batch size. num_workers: An integer representing the number of workers the dataset will be distributed across. num_replicas_per_worker: An integer representing the number of replicas per worker. All workers are assumed to have the same number of replicas. worker_index: An integer index of the worker to be rebatched. Returns: A `tf.int64` vector, representing the batch sizes to rebatch the dataset into. """ # Constraint (A) num_subbatches = num_workers * num_replicas_per_worker offset = worker_index * num_replicas_per_worker const_value = tensor_util.constant_value(global_batch_size) if const_value is not None: # Use the constant global batch size for further calculations global_batch_size = const_value # Let N = W * R. Constraint (B) and (D) jointly mean that the iterations # should have batch size either floor(B/N) or ceil(B/N). Namely, of the N # subbatches a batch is split into, B - N * floor(B/N) of them will have size # ceil(B/N), and the rest will have size floor(B/N). floor = global_batch_size // num_subbatches num_ceil = global_batch_size - (num_subbatches * floor) # For worker 0, we assign the first num_ceil subbatches to have size # ceil(B/N), and the remainder to have size floor(B/N). The other workers will # each be offset by R * worker_index in order to meet constraint (C). if const_value is not None: # If the global batch size is a known constant value, we return a constant # tensor directly instead of manipulating it with TF ops. This allows for # better downstream shape inference. worker_0 = [floor + 1] * num_ceil + [floor] * (num_subbatches - num_ceil) return ops.convert_to_tensor( worker_0[offset:] + worker_0[:offset], dtype=dtypes.int64, name="batch_sizes") worker_0 = array_ops.ones(num_subbatches, dtype=dtypes.int64) worker_0 = floor * worker_0 + array_ops.concat([ array_ops.ones(num_ceil, dtype=dtypes.int64), array_ops.zeros(num_subbatches - num_ceil, dtype=dtypes.int64) ], axis=0) return array_ops.concat([worker_0[offset:], worker_0[:offset]], axis=0) def compute_batch_size(dataset): """An operation that returns the batch size of the dataset. This op tries to infer the batch size statically by walking up the dataset tree from the final dataset node and returning the batch size of the first batching dataset (such as from .batch() and .padded_batch()) that it encounters. This differs from using the `element_spec` of a dataset in that it does not account for partial batches. This operation may fail if it encounters contradictory batch sizes (for example, if the dataset is created by zipping together two datasets with different batch sizes), if there are no explicit batching transformations, or if there are operations downstream from the batching transformation that may modify its batch size. In these cases, it returns a -1. Args: dataset: A `tf.data.Dataset` object. Returns: A `tf.int64` Tensor representing the batch size of the dataset sans partial batches. If this cannot be inferred statically, the value of this tensor will be -1. """ def get_static_batch_dim(type_spec): try: output_shape = type_spec._to_legacy_output_shapes() # pylint: disable=protected-access except NotImplementedError: return None if not isinstance(output_shape, tensor_shape.TensorShape): return None if output_shape.rank is None: return None return output_shape.dims[0].value batch_dims = [ get_static_batch_dim(type_spec) for type_spec in nest.flatten(dataset_ops.get_structure(dataset)) ] if all(d is not None for d in batch_dims): if all(d == batch_dims[0] for d in batch_dims): # If all batch dimensions are known and equal, return that directly. batch_dim = batch_dims[0] else: # If all batch dimensions are known but not all equal, return -1. batch_dim = -1 return constant_op.constant( batch_dim, dtype=dtypes.int64, name="static_batch_size") # If any batch dimensions are unknown, use compute_batch_size op. return ged_ops.compute_batch_size(dataset._variant_tensor) # pylint: disable=protected-access _AutoShardDatasetV1.__doc__ = _AutoShardDataset.__doc__
apache-2.0
YihaoLu/statsmodels
statsmodels/datasets/engel/data.py
25
1971
#! /usr/bin/env python """Name of dataset.""" __docformat__ = 'restructuredtext' COPYRIGHT = """This is public domain.""" TITLE = """Engel (1857) food expenditure data""" SOURCE = """ This dataset was used in Koenker and Bassett (1982) and distributed alongside the ``quantreg`` package for R. Koenker, R. and Bassett, G (1982) Robust Tests of Heteroscedasticity based on Regression Quantiles; Econometrica 50, 43-61. Roger Koenker (2012). quantreg: Quantile Regression. R package version 4.94. http://CRAN.R-project.org/package=quantreg """ DESCRSHORT = """Engel food expenditure data.""" DESCRLONG = """Data on income and food expenditure for 235 working class households in 1857 Belgium.""" #suggested notes NOTE = """:: Number of observations: 235 Number of variables: 2 Variable name definitions: income - annual household income (Belgian francs) foodexp - annual household food expenditure (Belgian francs) """ import numpy as np from statsmodels.datasets import utils as du from os.path import dirname, abspath def load(): """ Load the data and return a Dataset class instance. Returns ------- Dataset instance: See DATASET_PROPOSAL.txt for more information. """ data = _get_data() ##### SET THE INDICES ##### #NOTE: None for exog_idx is the complement of endog_idx return du.process_recarray(data, endog_idx=0, exog_idx=None, dtype=float) def load_pandas(): data = _get_data() ##### SET THE INDICES ##### #NOTE: None for exog_idx is the complement of endog_idx return du.process_recarray_pandas(data, endog_idx=0, exog_idx=None, dtype=float) def _get_data(): filepath = dirname(abspath(__file__)) ##### EDIT THE FOLLOWING TO POINT TO DatasetName.csv ##### data = np.recfromtxt(open(filepath + '/engel.csv', 'rb'), delimiter=",", names = True, dtype=float) return data
bsd-3-clause
heli522/scikit-learn
examples/mixture/plot_gmm_pdf.py
282
1528
""" ============================================= Density Estimation for a mixture of Gaussians ============================================= Plot the density estimation of a mixture of two Gaussians. Data is generated from two Gaussians with different centers and covariance matrices. """ import numpy as np import matplotlib.pyplot as plt from matplotlib.colors import LogNorm from sklearn import mixture n_samples = 300 # generate random sample, two components np.random.seed(0) # generate spherical data centered on (20, 20) shifted_gaussian = np.random.randn(n_samples, 2) + np.array([20, 20]) # generate zero centered stretched Gaussian data C = np.array([[0., -0.7], [3.5, .7]]) stretched_gaussian = np.dot(np.random.randn(n_samples, 2), C) # concatenate the two datasets into the final training set X_train = np.vstack([shifted_gaussian, stretched_gaussian]) # fit a Gaussian Mixture Model with two components clf = mixture.GMM(n_components=2, covariance_type='full') clf.fit(X_train) # display predicted scores by the model as a contour plot x = np.linspace(-20.0, 30.0) y = np.linspace(-20.0, 40.0) X, Y = np.meshgrid(x, y) XX = np.array([X.ravel(), Y.ravel()]).T Z = -clf.score_samples(XX)[0] Z = Z.reshape(X.shape) CS = plt.contour(X, Y, Z, norm=LogNorm(vmin=1.0, vmax=1000.0), levels=np.logspace(0, 3, 10)) CB = plt.colorbar(CS, shrink=0.8, extend='both') plt.scatter(X_train[:, 0], X_train[:, 1], .8) plt.title('Negative log-likelihood predicted by a GMM') plt.axis('tight') plt.show()
bsd-3-clause
arabenjamin/scikit-learn
sklearn/neighbors/tests/test_dist_metrics.py
228
5234
import itertools import pickle import numpy as np from numpy.testing import assert_array_almost_equal import scipy from scipy.spatial.distance import cdist from sklearn.neighbors.dist_metrics import DistanceMetric from nose import SkipTest def dist_func(x1, x2, p): return np.sum((x1 - x2) ** p) ** (1. / p) def cmp_version(version1, version2): version1 = tuple(map(int, version1.split('.')[:2])) version2 = tuple(map(int, version2.split('.')[:2])) if version1 < version2: return -1 elif version1 > version2: return 1 else: return 0 class TestMetrics: def __init__(self, n1=20, n2=25, d=4, zero_frac=0.5, rseed=0, dtype=np.float64): np.random.seed(rseed) self.X1 = np.random.random((n1, d)).astype(dtype) self.X2 = np.random.random((n2, d)).astype(dtype) # make boolean arrays: ones and zeros self.X1_bool = self.X1.round(0) self.X2_bool = self.X2.round(0) V = np.random.random((d, d)) VI = np.dot(V, V.T) self.metrics = {'euclidean': {}, 'cityblock': {}, 'minkowski': dict(p=(1, 1.5, 2, 3)), 'chebyshev': {}, 'seuclidean': dict(V=(np.random.random(d),)), 'wminkowski': dict(p=(1, 1.5, 3), w=(np.random.random(d),)), 'mahalanobis': dict(VI=(VI,)), 'hamming': {}, 'canberra': {}, 'braycurtis': {}} self.bool_metrics = ['matching', 'jaccard', 'dice', 'kulsinski', 'rogerstanimoto', 'russellrao', 'sokalmichener', 'sokalsneath'] def test_cdist(self): for metric, argdict in self.metrics.items(): keys = argdict.keys() for vals in itertools.product(*argdict.values()): kwargs = dict(zip(keys, vals)) D_true = cdist(self.X1, self.X2, metric, **kwargs) yield self.check_cdist, metric, kwargs, D_true for metric in self.bool_metrics: D_true = cdist(self.X1_bool, self.X2_bool, metric) yield self.check_cdist_bool, metric, D_true def check_cdist(self, metric, kwargs, D_true): if metric == 'canberra' and cmp_version(scipy.__version__, '0.9') <= 0: raise SkipTest("Canberra distance incorrect in scipy < 0.9") dm = DistanceMetric.get_metric(metric, **kwargs) D12 = dm.pairwise(self.X1, self.X2) assert_array_almost_equal(D12, D_true) def check_cdist_bool(self, metric, D_true): dm = DistanceMetric.get_metric(metric) D12 = dm.pairwise(self.X1_bool, self.X2_bool) assert_array_almost_equal(D12, D_true) def test_pdist(self): for metric, argdict in self.metrics.items(): keys = argdict.keys() for vals in itertools.product(*argdict.values()): kwargs = dict(zip(keys, vals)) D_true = cdist(self.X1, self.X1, metric, **kwargs) yield self.check_pdist, metric, kwargs, D_true for metric in self.bool_metrics: D_true = cdist(self.X1_bool, self.X1_bool, metric) yield self.check_pdist_bool, metric, D_true def check_pdist(self, metric, kwargs, D_true): if metric == 'canberra' and cmp_version(scipy.__version__, '0.9') <= 0: raise SkipTest("Canberra distance incorrect in scipy < 0.9") dm = DistanceMetric.get_metric(metric, **kwargs) D12 = dm.pairwise(self.X1) assert_array_almost_equal(D12, D_true) def check_pdist_bool(self, metric, D_true): dm = DistanceMetric.get_metric(metric) D12 = dm.pairwise(self.X1_bool) assert_array_almost_equal(D12, D_true) def test_haversine_metric(): def haversine_slow(x1, x2): return 2 * np.arcsin(np.sqrt(np.sin(0.5 * (x1[0] - x2[0])) ** 2 + np.cos(x1[0]) * np.cos(x2[0]) * np.sin(0.5 * (x1[1] - x2[1])) ** 2)) X = np.random.random((10, 2)) haversine = DistanceMetric.get_metric("haversine") D1 = haversine.pairwise(X) D2 = np.zeros_like(D1) for i, x1 in enumerate(X): for j, x2 in enumerate(X): D2[i, j] = haversine_slow(x1, x2) assert_array_almost_equal(D1, D2) assert_array_almost_equal(haversine.dist_to_rdist(D1), np.sin(0.5 * D2) ** 2) def test_pyfunc_metric(): X = np.random.random((10, 3)) euclidean = DistanceMetric.get_metric("euclidean") pyfunc = DistanceMetric.get_metric("pyfunc", func=dist_func, p=2) # Check if both callable metric and predefined metric initialized # DistanceMetric object is picklable euclidean_pkl = pickle.loads(pickle.dumps(euclidean)) pyfunc_pkl = pickle.loads(pickle.dumps(pyfunc)) D1 = euclidean.pairwise(X) D2 = pyfunc.pairwise(X) D1_pkl = euclidean_pkl.pairwise(X) D2_pkl = pyfunc_pkl.pairwise(X) assert_array_almost_equal(D1, D2) assert_array_almost_equal(D1_pkl, D2_pkl)
bsd-3-clause
heli522/scikit-learn
sklearn/linear_model/tests/test_theil_sen.py
233
9928
""" Testing for Theil-Sen module (sklearn.linear_model.theil_sen) """ # Author: Florian Wilhelm <florian.wilhelm@gmail.com> # License: BSD 3 clause from __future__ import division, print_function, absolute_import import os import sys from contextlib import contextmanager import numpy as np from numpy.testing import assert_array_equal, assert_array_less from numpy.testing import assert_array_almost_equal, assert_warns from scipy.linalg import norm from scipy.optimize import fmin_bfgs from nose.tools import raises, assert_almost_equal from sklearn.utils import ConvergenceWarning from sklearn.linear_model import LinearRegression, TheilSenRegressor from sklearn.linear_model.theil_sen import _spatial_median, _breakdown_point from sklearn.linear_model.theil_sen import _modified_weiszfeld_step from sklearn.utils.testing import assert_greater, assert_less @contextmanager def no_stdout_stderr(): old_stdout = sys.stdout old_stderr = sys.stderr sys.stdout = open(os.devnull, 'w') sys.stderr = open(os.devnull, 'w') yield sys.stdout.flush() sys.stderr.flush() sys.stdout = old_stdout sys.stderr = old_stderr def gen_toy_problem_1d(intercept=True): random_state = np.random.RandomState(0) # Linear model y = 3*x + N(2, 0.1**2) w = 3. if intercept: c = 2. n_samples = 50 else: c = 0.1 n_samples = 100 x = random_state.normal(size=n_samples) noise = 0.1 * random_state.normal(size=n_samples) y = w * x + c + noise # Add some outliers if intercept: x[42], y[42] = (-2, 4) x[43], y[43] = (-2.5, 8) x[33], y[33] = (2.5, 1) x[49], y[49] = (2.1, 2) else: x[42], y[42] = (-2, 4) x[43], y[43] = (-2.5, 8) x[53], y[53] = (2.5, 1) x[60], y[60] = (2.1, 2) x[72], y[72] = (1.8, -7) return x[:, np.newaxis], y, w, c def gen_toy_problem_2d(): random_state = np.random.RandomState(0) n_samples = 100 # Linear model y = 5*x_1 + 10*x_2 + N(1, 0.1**2) X = random_state.normal(size=(n_samples, 2)) w = np.array([5., 10.]) c = 1. noise = 0.1 * random_state.normal(size=n_samples) y = np.dot(X, w) + c + noise # Add some outliers n_outliers = n_samples // 10 ix = random_state.randint(0, n_samples, size=n_outliers) y[ix] = 50 * random_state.normal(size=n_outliers) return X, y, w, c def gen_toy_problem_4d(): random_state = np.random.RandomState(0) n_samples = 10000 # Linear model y = 5*x_1 + 10*x_2 + 42*x_3 + 7*x_4 + N(1, 0.1**2) X = random_state.normal(size=(n_samples, 4)) w = np.array([5., 10., 42., 7.]) c = 1. noise = 0.1 * random_state.normal(size=n_samples) y = np.dot(X, w) + c + noise # Add some outliers n_outliers = n_samples // 10 ix = random_state.randint(0, n_samples, size=n_outliers) y[ix] = 50 * random_state.normal(size=n_outliers) return X, y, w, c def test_modweiszfeld_step_1d(): X = np.array([1., 2., 3.]).reshape(3, 1) # Check startvalue is element of X and solution median = 2. new_y = _modified_weiszfeld_step(X, median) assert_array_almost_equal(new_y, median) # Check startvalue is not the solution y = 2.5 new_y = _modified_weiszfeld_step(X, y) assert_array_less(median, new_y) assert_array_less(new_y, y) # Check startvalue is not the solution but element of X y = 3. new_y = _modified_weiszfeld_step(X, y) assert_array_less(median, new_y) assert_array_less(new_y, y) # Check that a single vector is identity X = np.array([1., 2., 3.]).reshape(1, 3) y = X[0, ] new_y = _modified_weiszfeld_step(X, y) assert_array_equal(y, new_y) def test_modweiszfeld_step_2d(): X = np.array([0., 0., 1., 1., 0., 1.]).reshape(3, 2) y = np.array([0.5, 0.5]) # Check first two iterations new_y = _modified_weiszfeld_step(X, y) assert_array_almost_equal(new_y, np.array([1 / 3, 2 / 3])) new_y = _modified_weiszfeld_step(X, new_y) assert_array_almost_equal(new_y, np.array([0.2792408, 0.7207592])) # Check fix point y = np.array([0.21132505, 0.78867497]) new_y = _modified_weiszfeld_step(X, y) assert_array_almost_equal(new_y, y) def test_spatial_median_1d(): X = np.array([1., 2., 3.]).reshape(3, 1) true_median = 2. _, median = _spatial_median(X) assert_array_almost_equal(median, true_median) # Test larger problem and for exact solution in 1d case random_state = np.random.RandomState(0) X = random_state.randint(100, size=(1000, 1)) true_median = np.median(X.ravel()) _, median = _spatial_median(X) assert_array_equal(median, true_median) def test_spatial_median_2d(): X = np.array([0., 0., 1., 1., 0., 1.]).reshape(3, 2) _, median = _spatial_median(X, max_iter=100, tol=1.e-6) def cost_func(y): dists = np.array([norm(x - y) for x in X]) return np.sum(dists) # Check if median is solution of the Fermat-Weber location problem fermat_weber = fmin_bfgs(cost_func, median, disp=False) assert_array_almost_equal(median, fermat_weber) # Check when maximum iteration is exceeded a warning is emitted assert_warns(ConvergenceWarning, _spatial_median, X, max_iter=30, tol=0.) def test_theil_sen_1d(): X, y, w, c = gen_toy_problem_1d() # Check that Least Squares fails lstq = LinearRegression().fit(X, y) assert_greater(np.abs(lstq.coef_ - w), 0.9) # Check that Theil-Sen works theil_sen = TheilSenRegressor(random_state=0).fit(X, y) assert_array_almost_equal(theil_sen.coef_, w, 1) assert_array_almost_equal(theil_sen.intercept_, c, 1) def test_theil_sen_1d_no_intercept(): X, y, w, c = gen_toy_problem_1d(intercept=False) # Check that Least Squares fails lstq = LinearRegression(fit_intercept=False).fit(X, y) assert_greater(np.abs(lstq.coef_ - w - c), 0.5) # Check that Theil-Sen works theil_sen = TheilSenRegressor(fit_intercept=False, random_state=0).fit(X, y) assert_array_almost_equal(theil_sen.coef_, w + c, 1) assert_almost_equal(theil_sen.intercept_, 0.) def test_theil_sen_2d(): X, y, w, c = gen_toy_problem_2d() # Check that Least Squares fails lstq = LinearRegression().fit(X, y) assert_greater(norm(lstq.coef_ - w), 1.0) # Check that Theil-Sen works theil_sen = TheilSenRegressor(max_subpopulation=1e3, random_state=0).fit(X, y) assert_array_almost_equal(theil_sen.coef_, w, 1) assert_array_almost_equal(theil_sen.intercept_, c, 1) def test_calc_breakdown_point(): bp = _breakdown_point(1e10, 2) assert_less(np.abs(bp - 1 + 1/(np.sqrt(2))), 1.e-6) @raises(ValueError) def test_checksubparams_negative_subpopulation(): X, y, w, c = gen_toy_problem_1d() TheilSenRegressor(max_subpopulation=-1, random_state=0).fit(X, y) @raises(ValueError) def test_checksubparams_too_few_subsamples(): X, y, w, c = gen_toy_problem_1d() TheilSenRegressor(n_subsamples=1, random_state=0).fit(X, y) @raises(ValueError) def test_checksubparams_too_many_subsamples(): X, y, w, c = gen_toy_problem_1d() TheilSenRegressor(n_subsamples=101, random_state=0).fit(X, y) @raises(ValueError) def test_checksubparams_n_subsamples_if_less_samples_than_features(): random_state = np.random.RandomState(0) n_samples, n_features = 10, 20 X = random_state.normal(size=(n_samples, n_features)) y = random_state.normal(size=n_samples) TheilSenRegressor(n_subsamples=9, random_state=0).fit(X, y) def test_subpopulation(): X, y, w, c = gen_toy_problem_4d() theil_sen = TheilSenRegressor(max_subpopulation=250, random_state=0).fit(X, y) assert_array_almost_equal(theil_sen.coef_, w, 1) assert_array_almost_equal(theil_sen.intercept_, c, 1) def test_subsamples(): X, y, w, c = gen_toy_problem_4d() theil_sen = TheilSenRegressor(n_subsamples=X.shape[0], random_state=0).fit(X, y) lstq = LinearRegression().fit(X, y) # Check for exact the same results as Least Squares assert_array_almost_equal(theil_sen.coef_, lstq.coef_, 9) def test_verbosity(): X, y, w, c = gen_toy_problem_1d() # Check that Theil-Sen can be verbose with no_stdout_stderr(): TheilSenRegressor(verbose=True, random_state=0).fit(X, y) TheilSenRegressor(verbose=True, max_subpopulation=10, random_state=0).fit(X, y) def test_theil_sen_parallel(): X, y, w, c = gen_toy_problem_2d() # Check that Least Squares fails lstq = LinearRegression().fit(X, y) assert_greater(norm(lstq.coef_ - w), 1.0) # Check that Theil-Sen works theil_sen = TheilSenRegressor(n_jobs=-1, random_state=0, max_subpopulation=2e3).fit(X, y) assert_array_almost_equal(theil_sen.coef_, w, 1) assert_array_almost_equal(theil_sen.intercept_, c, 1) def test_less_samples_than_features(): random_state = np.random.RandomState(0) n_samples, n_features = 10, 20 X = random_state.normal(size=(n_samples, n_features)) y = random_state.normal(size=n_samples) # Check that Theil-Sen falls back to Least Squares if fit_intercept=False theil_sen = TheilSenRegressor(fit_intercept=False, random_state=0).fit(X, y) lstq = LinearRegression(fit_intercept=False).fit(X, y) assert_array_almost_equal(theil_sen.coef_, lstq.coef_, 12) # Check fit_intercept=True case. This will not be equal to the Least # Squares solution since the intercept is calculated differently. theil_sen = TheilSenRegressor(fit_intercept=True, random_state=0).fit(X, y) y_pred = theil_sen.predict(X) assert_array_almost_equal(y_pred, y, 12)
bsd-3-clause
arabenjamin/scikit-learn
sklearn/preprocessing/tests/test_imputation.py
212
11911
import numpy as np from scipy import sparse from sklearn.utils.testing import assert_equal from sklearn.utils.testing import assert_array_equal from sklearn.utils.testing import assert_raises from sklearn.utils.testing import assert_false from sklearn.utils.testing import assert_true from sklearn.preprocessing.imputation import Imputer from sklearn.pipeline import Pipeline from sklearn import grid_search from sklearn import tree from sklearn.random_projection import sparse_random_matrix def _check_statistics(X, X_true, strategy, statistics, missing_values): """Utility function for testing imputation for a given strategy. Test: - along the two axes - with dense and sparse arrays Check that: - the statistics (mean, median, mode) are correct - the missing values are imputed correctly""" err_msg = "Parameters: strategy = %s, missing_values = %s, " \ "axis = {0}, sparse = {1}" % (strategy, missing_values) # Normal matrix, axis = 0 imputer = Imputer(missing_values, strategy=strategy, axis=0) X_trans = imputer.fit(X).transform(X.copy()) assert_array_equal(imputer.statistics_, statistics, err_msg.format(0, False)) assert_array_equal(X_trans, X_true, err_msg.format(0, False)) # Normal matrix, axis = 1 imputer = Imputer(missing_values, strategy=strategy, axis=1) imputer.fit(X.transpose()) if np.isnan(statistics).any(): assert_raises(ValueError, imputer.transform, X.copy().transpose()) else: X_trans = imputer.transform(X.copy().transpose()) assert_array_equal(X_trans, X_true.transpose(), err_msg.format(1, False)) # Sparse matrix, axis = 0 imputer = Imputer(missing_values, strategy=strategy, axis=0) imputer.fit(sparse.csc_matrix(X)) X_trans = imputer.transform(sparse.csc_matrix(X.copy())) if sparse.issparse(X_trans): X_trans = X_trans.toarray() assert_array_equal(imputer.statistics_, statistics, err_msg.format(0, True)) assert_array_equal(X_trans, X_true, err_msg.format(0, True)) # Sparse matrix, axis = 1 imputer = Imputer(missing_values, strategy=strategy, axis=1) imputer.fit(sparse.csc_matrix(X.transpose())) if np.isnan(statistics).any(): assert_raises(ValueError, imputer.transform, sparse.csc_matrix(X.copy().transpose())) else: X_trans = imputer.transform(sparse.csc_matrix(X.copy().transpose())) if sparse.issparse(X_trans): X_trans = X_trans.toarray() assert_array_equal(X_trans, X_true.transpose(), err_msg.format(1, True)) def test_imputation_shape(): # Verify the shapes of the imputed matrix for different strategies. X = np.random.randn(10, 2) X[::2] = np.nan for strategy in ['mean', 'median', 'most_frequent']: imputer = Imputer(strategy=strategy) X_imputed = imputer.fit_transform(X) assert_equal(X_imputed.shape, (10, 2)) X_imputed = imputer.fit_transform(sparse.csr_matrix(X)) assert_equal(X_imputed.shape, (10, 2)) def test_imputation_mean_median_only_zero(): # Test imputation using the mean and median strategies, when # missing_values == 0. X = np.array([ [np.nan, 0, 0, 0, 5], [np.nan, 1, 0, np.nan, 3], [np.nan, 2, 0, 0, 0], [np.nan, 6, 0, 5, 13], ]) X_imputed_mean = np.array([ [3, 5], [1, 3], [2, 7], [6, 13], ]) statistics_mean = [np.nan, 3, np.nan, np.nan, 7] # Behaviour of median with NaN is undefined, e.g. different results in # np.median and np.ma.median X_for_median = X[:, [0, 1, 2, 4]] X_imputed_median = np.array([ [2, 5], [1, 3], [2, 5], [6, 13], ]) statistics_median = [np.nan, 2, np.nan, 5] _check_statistics(X, X_imputed_mean, "mean", statistics_mean, 0) _check_statistics(X_for_median, X_imputed_median, "median", statistics_median, 0) def test_imputation_mean_median(): # Test imputation using the mean and median strategies, when # missing_values != 0. rng = np.random.RandomState(0) dim = 10 dec = 10 shape = (dim * dim, dim + dec) zeros = np.zeros(shape[0]) values = np.arange(1, shape[0]+1) values[4::2] = - values[4::2] tests = [("mean", "NaN", lambda z, v, p: np.mean(np.hstack((z, v)))), ("mean", 0, lambda z, v, p: np.mean(v)), ("median", "NaN", lambda z, v, p: np.median(np.hstack((z, v)))), ("median", 0, lambda z, v, p: np.median(v))] for strategy, test_missing_values, true_value_fun in tests: X = np.empty(shape) X_true = np.empty(shape) true_statistics = np.empty(shape[1]) # Create a matrix X with columns # - with only zeros, # - with only missing values # - with zeros, missing values and values # And a matrix X_true containing all true values for j in range(shape[1]): nb_zeros = (j - dec + 1 > 0) * (j - dec + 1) * (j - dec + 1) nb_missing_values = max(shape[0] + dec * dec - (j + dec) * (j + dec), 0) nb_values = shape[0] - nb_zeros - nb_missing_values z = zeros[:nb_zeros] p = np.repeat(test_missing_values, nb_missing_values) v = values[rng.permutation(len(values))[:nb_values]] true_statistics[j] = true_value_fun(z, v, p) # Create the columns X[:, j] = np.hstack((v, z, p)) if 0 == test_missing_values: X_true[:, j] = np.hstack((v, np.repeat( true_statistics[j], nb_missing_values + nb_zeros))) else: X_true[:, j] = np.hstack((v, z, np.repeat(true_statistics[j], nb_missing_values))) # Shuffle them the same way np.random.RandomState(j).shuffle(X[:, j]) np.random.RandomState(j).shuffle(X_true[:, j]) # Mean doesn't support columns containing NaNs, median does if strategy == "median": cols_to_keep = ~np.isnan(X_true).any(axis=0) else: cols_to_keep = ~np.isnan(X_true).all(axis=0) X_true = X_true[:, cols_to_keep] _check_statistics(X, X_true, strategy, true_statistics, test_missing_values) def test_imputation_median_special_cases(): # Test median imputation with sparse boundary cases X = np.array([ [0, np.nan, np.nan], # odd: implicit zero [5, np.nan, np.nan], # odd: explicit nonzero [0, 0, np.nan], # even: average two zeros [-5, 0, np.nan], # even: avg zero and neg [0, 5, np.nan], # even: avg zero and pos [4, 5, np.nan], # even: avg nonzeros [-4, -5, np.nan], # even: avg negatives [-1, 2, np.nan], # even: crossing neg and pos ]).transpose() X_imputed_median = np.array([ [0, 0, 0], [5, 5, 5], [0, 0, 0], [-5, 0, -2.5], [0, 5, 2.5], [4, 5, 4.5], [-4, -5, -4.5], [-1, 2, .5], ]).transpose() statistics_median = [0, 5, 0, -2.5, 2.5, 4.5, -4.5, .5] _check_statistics(X, X_imputed_median, "median", statistics_median, 'NaN') def test_imputation_most_frequent(): # Test imputation using the most-frequent strategy. X = np.array([ [-1, -1, 0, 5], [-1, 2, -1, 3], [-1, 1, 3, -1], [-1, 2, 3, 7], ]) X_true = np.array([ [2, 0, 5], [2, 3, 3], [1, 3, 3], [2, 3, 7], ]) # scipy.stats.mode, used in Imputer, doesn't return the first most # frequent as promised in the doc but the lowest most frequent. When this # test will fail after an update of scipy, Imputer will need to be updated # to be consistent with the new (correct) behaviour _check_statistics(X, X_true, "most_frequent", [np.nan, 2, 3, 3], -1) def test_imputation_pipeline_grid_search(): # Test imputation within a pipeline + gridsearch. pipeline = Pipeline([('imputer', Imputer(missing_values=0)), ('tree', tree.DecisionTreeRegressor(random_state=0))]) parameters = { 'imputer__strategy': ["mean", "median", "most_frequent"], 'imputer__axis': [0, 1] } l = 100 X = sparse_random_matrix(l, l, density=0.10) Y = sparse_random_matrix(l, 1, density=0.10).toarray() gs = grid_search.GridSearchCV(pipeline, parameters) gs.fit(X, Y) def test_imputation_pickle(): # Test for pickling imputers. import pickle l = 100 X = sparse_random_matrix(l, l, density=0.10) for strategy in ["mean", "median", "most_frequent"]: imputer = Imputer(missing_values=0, strategy=strategy) imputer.fit(X) imputer_pickled = pickle.loads(pickle.dumps(imputer)) assert_array_equal(imputer.transform(X.copy()), imputer_pickled.transform(X.copy()), "Fail to transform the data after pickling " "(strategy = %s)" % (strategy)) def test_imputation_copy(): # Test imputation with copy X_orig = sparse_random_matrix(5, 5, density=0.75, random_state=0) # copy=True, dense => copy X = X_orig.copy().toarray() imputer = Imputer(missing_values=0, strategy="mean", copy=True) Xt = imputer.fit(X).transform(X) Xt[0, 0] = -1 assert_false(np.all(X == Xt)) # copy=True, sparse csr => copy X = X_orig.copy() imputer = Imputer(missing_values=X.data[0], strategy="mean", copy=True) Xt = imputer.fit(X).transform(X) Xt.data[0] = -1 assert_false(np.all(X.data == Xt.data)) # copy=False, dense => no copy X = X_orig.copy().toarray() imputer = Imputer(missing_values=0, strategy="mean", copy=False) Xt = imputer.fit(X).transform(X) Xt[0, 0] = -1 assert_true(np.all(X == Xt)) # copy=False, sparse csr, axis=1 => no copy X = X_orig.copy() imputer = Imputer(missing_values=X.data[0], strategy="mean", copy=False, axis=1) Xt = imputer.fit(X).transform(X) Xt.data[0] = -1 assert_true(np.all(X.data == Xt.data)) # copy=False, sparse csc, axis=0 => no copy X = X_orig.copy().tocsc() imputer = Imputer(missing_values=X.data[0], strategy="mean", copy=False, axis=0) Xt = imputer.fit(X).transform(X) Xt.data[0] = -1 assert_true(np.all(X.data == Xt.data)) # copy=False, sparse csr, axis=0 => copy X = X_orig.copy() imputer = Imputer(missing_values=X.data[0], strategy="mean", copy=False, axis=0) Xt = imputer.fit(X).transform(X) Xt.data[0] = -1 assert_false(np.all(X.data == Xt.data)) # copy=False, sparse csc, axis=1 => copy X = X_orig.copy().tocsc() imputer = Imputer(missing_values=X.data[0], strategy="mean", copy=False, axis=1) Xt = imputer.fit(X).transform(X) Xt.data[0] = -1 assert_false(np.all(X.data == Xt.data)) # copy=False, sparse csr, axis=1, missing_values=0 => copy X = X_orig.copy() imputer = Imputer(missing_values=0, strategy="mean", copy=False, axis=1) Xt = imputer.fit(X).transform(X) assert_false(sparse.issparse(Xt)) # Note: If X is sparse and if missing_values=0, then a (dense) copy of X is # made, even if copy=False.
bsd-3-clause
deepgram/kur
kur/loss/ctc.py
1
10464
""" Copyright 2016 Deepgram Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ import logging import numpy from . import Loss from ..sources import RepeatSource, DerivedSource from ..engine import PassthroughEngine from ..utils import can_import logger = logging.getLogger(__name__) ############################################################################### class ScaledSource(DerivedSource): """ Derived source which scales `input_length` by the same amount that the length of `scale_this` is scaled down to `target`. """ ########################################################################### def __init__(self, model, relative_to, to_this, scale_this): super().__init__() self.model = model self.relative_to = relative_to self.to_this = to_this self.scale_this = scale_this self.normal_shape = None ############################################################### def derive(self, inputs): # Break it apart sizes, = inputs if sizes.ndim < 2: sizes = numpy.expand_dims(sizes, -1) outputs = numpy.array( [ self.model.get_shape_at_layer( name=self.to_this, assumptions={ self.relative_to : \ (x[0], ) + tuple(self.normal_shape[1:]) } )[0] for x in sizes ], dtype='int32' ) return numpy.expand_dims(outputs, axis=1) ############################################################### def setup(self): self.normal_shape = self.model.get_shape_at_layer( name=self.relative_to ) ############################################################### def shape(self): return (None, 1) ############################################################### def requires(self): return (self.scale_this, ) ################################################################### class FlattenSource(DerivedSource): """ Derived source which converts a rectangular array of labels (one for each sample, and all padded to the same width), along with the label lengths (the number of non- padded entries in each sample) and produces a single, long, flattened list of labels. """ ############################################################### def __init__(self, label, label_length): super().__init__() self.label = label self.label_length = label_length ############################################################### def derive(self, inputs): # Break it apart labels, label_lengths = inputs outputs = numpy.array([ x for label, label_length in zip(labels, label_lengths) for x in label[:label_length[0]] ]) # Warp-CTC is strange: it uses a variable-length vector of # flattened labels. That doesn't look much like a deep # learning tensor! So we can cheat without imposing much of # a memory hit. outputs = numpy.lib.stride_tricks.as_strided( outputs, shape=(len(labels), ) + outputs.shape, strides=(0, ) + outputs.strides ) return outputs ############################################################### def shape(self): return (None,) ############################################################### def requires(self): return (self.label, self.label_length) ############################################################################### class Ctc(Loss): """ Connectionist Temporal Classification loss function """ KNOWN_VARIANTS = {'warp', 'loss_scale'} ########################################################################### def __init__(self, input_length, output_length, output, relative_to=None, variant=None, **kwargs): """ Creates a new CTC loss function. # Arguments """ super().__init__(**kwargs) if variant is None: variant = set() elif isinstance(variant, str): variant = {variant} elif isinstance(variant, (list, tuple, set)): variant = set(variant) else: raise ValueError('Unexpected or unsupport CTC variant type: {}' .format(variant)) variant.discard(None) for x in variant: if x not in Ctc.KNOWN_VARIANTS: logger.warning('Ignoring an unknown variant to the CTC loss ' 'function: %s', x) self.variant = variant self.input_length = input_length self.output_length = output_length self.output = output self.relative_to = relative_to ########################################################################### def get_loss(self, model, target, output): """ Returns the loss function that can be used by the implementation- specific model. """ backend = model.get_backend() if backend.get_name() == 'keras': import keras.backend as K if 'warp' in self.variant: # Just use the built-in Keras CTC loss function. logger.info('Attaching Warp-CTC loss function to model ' 'output "%s".', target) if backend.get_toolchain() != 'theano': logger.error('If you want to use warp-ctc, you need to ' 'use the Theano backend to Keras.') raise ValueError('Warp-CTC is currently only supported ' 'with the Theano backend to Keras.') else: # Just use the built-in Keras CTC loss function. logger.debug('Attaching built-in Keras CTC loss function to ' 'model output "%s".', target) ctc_scaled = 'ctc_scaled_{}'.format(self.input_length) flattened_labels = 'ctc_flattened_labels_{}'.format(target) transcript_length = K.placeholder( ndim=2, dtype='int32', name=self.output_length ) transcript = K.placeholder( ndim=2, dtype='int32', name=flattened_labels if 'warp' in self.variant \ else self.output ) utterance_length = K.placeholder( ndim=2, dtype='int32', name=self.input_length if self.relative_to is None \ else ctc_scaled ) if self.relative_to is not None: model.add_data_source( ctc_scaled, ScaledSource( model, relative_to=self.relative_to, to_this=target, scale_this=self.input_length ) ) if 'warp' in self.variant: model.add_data_source( flattened_labels, FlattenSource( self.output, self.output_length ) ) try: import ctc # pylint: disable=import-error except ImportError: logger.error('The warp-CTC loss function was requested, ' 'but we cannot find the "ctc" library. See our ' 'troubleshooting page for helpful tips.') raise ImportError('Cannot find the "ctc" library, which ' 'is needed when using the "warp" variant of the CTC ' 'loss function.') out = ctc.cpu_ctc_th( output.dimshuffle((1, 0, 2)), K.squeeze(utterance_length, -1), transcript[0]+1, K.squeeze(transcript_length, -1) ) else: out = K.ctc_batch_cost( transcript, output, utterance_length, transcript_length ) if 'loss_scale' in self.variant: logger.debug('Loss scaling is active.') out = out * K.mean( K.cast(utterance_length, K.dtype(out)) ) / 100 return ( ( (self.output_length, transcript_length), (flattened_labels if 'warp' in self.variant \ else self.output, transcript), (self.input_length if self.relative_to is None \ else ctc_scaled, utterance_length) ), out ) elif backend.get_name() == 'pytorch': if 'warp' not in self.variant: logger.error('PyTorch does not include a native CTC loss ' 'function yet. However, PyTorch bindings to Warp CTC are ' 'available (SeanNaren/warp-ctc). Try installing that, and ' 'then settings variant=warp.') raise ValueError('Only Warp CTC is supported for PyTorch ' 'right now.') ctc_scaled = 'ctc_scaled_{}'.format(self.input_length) flattened_labels = 'ctc_flattened_labels_{}'.format(target) transcript_length = model.data.placeholder( self.output_length, location='cpu', data_type='int' ) transcript = model.data.placeholder( flattened_labels, location='cpu', data_type='int' ) utterance_length = model.data.placeholder( self.input_length if self.relative_to is None else ctc_scaled, location='cpu', data_type='int' ) if self.relative_to is not None: model.add_data_source( ctc_scaled, ScaledSource( model, relative_to=self.relative_to, to_this=target, scale_this=self.input_length ) ) if 'warp' in self.variant: model.add_data_source( flattened_labels, FlattenSource( self.output, self.output_length ) ) try: from warpctc_pytorch import CTCLoss # pytorch: disable=import-error except ImportError: logger.error('The warp-CTC loss function was requested, ' 'but we cannot find the "warpctc_pytorch" library. See ' 'out troubleshooting page for helpful tips.') raise ImportError('Cannot find the "warpctc_pytorch" library, ' 'which is needed when using the "warp" variant of the CTC ' 'loss function.') loss = model.data.move(CTCLoss()) def basic_ctc_loss(inputs, output): """ Computes CTC loss. """ return loss( output.transpose(1, 0).contiguous(), inputs[0][0]+1, # transcript[0]+1 inputs[1].squeeze(1), # K.squeeze(utterance_length, -1), inputs[2].squeeze(1) # K.squeeze(transcript_length, -1) ) / output.size(0) if 'loss_scale' in self.variant: logger.debug('Loss scaling is active.') def loss_scale(inputs, output): """ Computes CTC loss. """ factor = inputs[1].float().mean().data[0] / 100. return basic_ctc_loss(inputs, output) * factor get_ctc_loss = loss_scale else: get_ctc_loss = basic_ctc_loss return [ [ (flattened_labels if 'warp' in self.variant \ else self.output, transcript), (self.input_length if self.relative_to is None \ else ctc_scaled, utterance_length), (self.output_length, transcript_length) ], get_ctc_loss ] else: raise ValueError('Unsupported backend "{}" for loss function "{}"' .format(backend.get_name(), self.get_name())) ### EOF.EOF.EOF.EOF.EOF.EOF.EOF.EOF.EOF.EOF.EOF.EOF.EOF.EOF.EOF.EOF.EOF.EOF.EOF
apache-2.0
FredLoney/nipype
nipype/interfaces/slicer/diffusion/diffusion.py
15
23386
# -*- coding: utf8 -*- """Autogenerated file - DO NOT EDIT If you spot a bug, please report it on the mailing list and/or change the generator.""" from nipype.interfaces.base import CommandLine, CommandLineInputSpec, SEMLikeCommandLine, TraitedSpec, File, Directory, traits, isdefined, InputMultiPath, OutputMultiPath import os class ResampleDTIVolumeInputSpec(CommandLineInputSpec): inputVolume = File(position=-2, desc="Input volume to be resampled", exists=True, argstr="%s") outputVolume = traits.Either(traits.Bool, File(), position=-1, hash_files=False, desc="Resampled Volume", argstr="%s") Reference = File(desc="Reference Volume (spacing,size,orientation,origin)", exists=True, argstr="--Reference %s") transformationFile = File(exists=True, argstr="--transformationFile %s") defField = File(desc="File containing the deformation field (3D vector image containing vectors with 3 components)", exists=True, argstr="--defField %s") hfieldtype = traits.Enum("displacement", "h-Field", desc="Set if the deformation field is an -Field", argstr="--hfieldtype %s") interpolation = traits.Enum("linear", "nn", "ws", "bs", desc="Sampling algorithm (linear , nn (nearest neighborhoor), ws (WindowedSinc), bs (BSpline) )", argstr="--interpolation %s") correction = traits.Enum("zero", "none", "abs", "nearest", desc="Correct the tensors if computed tensor is not semi-definite positive", argstr="--correction %s") transform_tensor_method = traits.Enum("PPD", "FS", desc="Chooses between 2 methods to transform the tensors: Finite Strain (FS), faster but less accurate, or Preservation of the Principal Direction (PPD)", argstr="--transform_tensor_method %s") transform_order = traits.Enum("input-to-output", "output-to-input", desc="Select in what order the transforms are read", argstr="--transform_order %s") notbulk = traits.Bool(desc="The transform following the BSpline transform is not set as a bulk transform for the BSpline transform", argstr="--notbulk ") spaceChange = traits.Bool(desc="Space Orientation between transform and image is different (RAS/LPS) (warning: if the transform is a Transform Node in Slicer3, do not select)", argstr="--spaceChange ") rotation_point = traits.List(desc="Center of rotation (only for rigid and affine transforms)", argstr="--rotation_point %s") centered_transform = traits.Bool(desc="Set the center of the transformation to the center of the input image (only for rigid and affine transforms)", argstr="--centered_transform ") image_center = traits.Enum("input", "output", desc="Image to use to center the transform (used only if \'Centered Transform\' is selected)", argstr="--image_center %s") Inverse_ITK_Transformation = traits.Bool(desc="Inverse the transformation before applying it from output image to input image (only for rigid and affine transforms)", argstr="--Inverse_ITK_Transformation ") spacing = InputMultiPath(traits.Float, desc="Spacing along each dimension (0 means use input spacing)", sep=",", argstr="--spacing %s") size = InputMultiPath(traits.Float, desc="Size along each dimension (0 means use input size)", sep=",", argstr="--size %s") origin = traits.List(desc="Origin of the output Image", argstr="--origin %s") direction_matrix = InputMultiPath(traits.Float, desc="9 parameters of the direction matrix by rows (ijk to LPS if LPS transform, ijk to RAS if RAS transform)", sep=",", argstr="--direction_matrix %s") number_of_thread = traits.Int(desc="Number of thread used to compute the output image", argstr="--number_of_thread %d") default_pixel_value = traits.Float(desc="Default pixel value for samples falling outside of the input region", argstr="--default_pixel_value %f") window_function = traits.Enum("h", "c", "w", "l", "b", desc="Window Function , h = Hamming , c = Cosine , w = Welch , l = Lanczos , b = Blackman", argstr="--window_function %s") spline_order = traits.Int(desc="Spline Order (Spline order may be from 0 to 5)", argstr="--spline_order %d") transform_matrix = InputMultiPath(traits.Float, desc="12 parameters of the transform matrix by rows ( --last 3 being translation-- )", sep=",", argstr="--transform_matrix %s") transform = traits.Enum("rt", "a", desc="Transform algorithm, rt = Rigid Transform, a = Affine Transform", argstr="--transform %s") class ResampleDTIVolumeOutputSpec(TraitedSpec): outputVolume = File(position=-1, desc="Resampled Volume", exists=True) class ResampleDTIVolume(SEMLikeCommandLine): """title: Resample DTI Volume category: Diffusion.Diffusion Tensor Images description: Resampling an image is a very important task in image analysis. It is especially important in the frame of image registration. This module implements DT image resampling through the use of itk Transforms. The resampling is controlled by the Output Spacing. "Resampling" is performed in space coordinates, not pixel/grid coordinates. It is quite important to ensure that image spacing is properly set on the images involved. The interpolator is required since the mapping from one space to the other will often require evaluation of the intensity of the image at non-grid positions. version: 0.1 documentation-url: http://wiki.slicer.org/slicerWiki/index.php/Documentation/4.1/Modules/ResampleDTI contributor: Francois Budin (UNC) acknowledgements: This work is part of the National Alliance for Medical Image Computing (NAMIC), funded by the National Institutes of Health through the NIH Roadmap for Medical Research, Grant U54 EB005149. Information on the National Centers for Biomedical Computing can be obtained from http://nihroadmap.nih.gov/bioinformatics """ input_spec = ResampleDTIVolumeInputSpec output_spec = ResampleDTIVolumeOutputSpec _cmd = "ResampleDTIVolume " _outputs_filenames = {'outputVolume':'outputVolume.nii'} class DWIRicianLMMSEFilterInputSpec(CommandLineInputSpec): iter = traits.Int(desc="Number of iterations for the noise removal filter.", argstr="--iter %d") re = InputMultiPath(traits.Int, desc="Estimation radius.", sep=",", argstr="--re %s") rf = InputMultiPath(traits.Int, desc="Filtering radius.", sep=",", argstr="--rf %s") mnvf = traits.Int(desc="Minimum number of voxels in kernel used for filtering.", argstr="--mnvf %d") mnve = traits.Int(desc="Minimum number of voxels in kernel used for estimation.", argstr="--mnve %d") minnstd = traits.Int(desc="Minimum allowed noise standard deviation.", argstr="--minnstd %d") maxnstd = traits.Int(desc="Maximum allowed noise standard deviation.", argstr="--maxnstd %d") hrf = traits.Float(desc="How many histogram bins per unit interval.", argstr="--hrf %f") uav = traits.Bool(desc="Use absolute value in case of negative square.", argstr="--uav ") inputVolume = File(position=-2, desc="Input DWI volume.", exists=True, argstr="%s") outputVolume = traits.Either(traits.Bool, File(), position=-1, hash_files=False, desc="Output DWI volume.", argstr="%s") compressOutput = traits.Bool(desc="Compress the data of the compressed file using gzip", argstr="--compressOutput ") class DWIRicianLMMSEFilterOutputSpec(TraitedSpec): outputVolume = File(position=-1, desc="Output DWI volume.", exists=True) class DWIRicianLMMSEFilter(SEMLikeCommandLine): """title: DWI Rician LMMSE Filter category: Diffusion.Diffusion Weighted Images description: This module reduces noise (or unwanted detail) on a set of diffusion weighted images. For this, it filters the image in the mean squared error sense using a Rician noise model. Images corresponding to each gradient direction, including baseline, are processed individually. The noise parameter is automatically estimated (noise estimation improved but slower). Note that this is a general purpose filter for MRi images. The module jointLMMSE has been specifically designed for DWI volumes and shows a better performance, so its use is recommended instead. A complete description of the algorithm in this module can be found in: S. Aja-Fernandez, M. Niethammer, M. Kubicki, M. Shenton, and C.-F. Westin. Restoration of DWI data using a Rician LMMSE estimator. IEEE Transactions on Medical Imaging, 27(10): pp. 1389-1403, Oct. 2008. version: 0.1.1.$Revision: 1 $(alpha) documentation-url: http://wiki.slicer.org/slicerWiki/index.php/Documentation/4.1/Modules/RicianLMMSEImageFilter contributor: Antonio Tristan Vega (UVa), Santiago Aja Fernandez (UVa), Marc Niethammer (UNC) acknowledgements: Partially founded by grant number TEC2007-67073/TCM from the Comision Interministerial de Ciencia y Tecnologia (Spain). """ input_spec = DWIRicianLMMSEFilterInputSpec output_spec = DWIRicianLMMSEFilterOutputSpec _cmd = "DWIRicianLMMSEFilter " _outputs_filenames = {'outputVolume':'outputVolume.nii'} class TractographyLabelMapSeedingInputSpec(CommandLineInputSpec): InputVolume = File(position=-2, desc="Input DTI volume", exists=True, argstr="%s") inputroi = File(desc="Label map with seeding ROIs", exists=True, argstr="--inputroi %s") OutputFibers = traits.Either(traits.Bool, File(), position=-1, hash_files=False, desc="Tractography result", argstr="%s") useindexspace = traits.Bool(desc="Seed at IJK voxel grid", argstr="--useindexspace ") seedspacing = traits.Float(desc="Spacing (in mm) between seed points, only matters if use Use Index Space is off", argstr="--seedspacing %f") randomgrid = traits.Bool(desc="Enable random placing of seeds", argstr="--randomgrid ") clthreshold = traits.Float(desc="Minimum Linear Measure for the seeding to start.", argstr="--clthreshold %f") minimumlength = traits.Float(desc="Minimum length of the fibers (in mm)", argstr="--minimumlength %f") maximumlength = traits.Float(desc="Maximum length of fibers (in mm)", argstr="--maximumlength %f") stoppingmode = traits.Enum("LinearMeasure", "FractionalAnisotropy", desc="Tensor measurement used to stop the tractography", argstr="--stoppingmode %s") stoppingvalue = traits.Float(desc="Tractography will stop when the stopping measurement drops below this value", argstr="--stoppingvalue %f") stoppingcurvature = traits.Float(desc="Tractography will stop if radius of curvature becomes smaller than this number units are degrees per mm", argstr="--stoppingcurvature %f") integrationsteplength = traits.Float(desc="Distance between points on the same fiber in mm", argstr="--integrationsteplength %f") label = traits.Int(desc="Label value that defines seeding region.", argstr="--label %d") writetofile = traits.Bool(desc="Write fibers to disk or create in the scene?", argstr="--writetofile ") outputdirectory = traits.Either(traits.Bool, Directory(), hash_files=False, desc="Directory in which to save fiber(s)", argstr="--outputdirectory %s") name = traits.Str(desc="Name to use for fiber files", argstr="--name %s") class TractographyLabelMapSeedingOutputSpec(TraitedSpec): OutputFibers = File(position=-1, desc="Tractography result", exists=True) outputdirectory = Directory(desc="Directory in which to save fiber(s)", exists=True) class TractographyLabelMapSeeding(SEMLikeCommandLine): """title: Tractography Label Map Seeding category: Diffusion.Diffusion Tensor Images description: Seed tracts on a Diffusion Tensor Image (DT) from a label map version: 0.1.0.$Revision: 1892 $(alpha) documentation-url: http://wiki.slicer.org/slicerWiki/index.php/Documentation/4.1/Modules/Seeding license: slicer3 contributor: Raul San Jose (SPL, BWH), Demian Wassermann (SPL, BWH) acknowledgements: Laboratory of Mathematics in Imaging. This work is part of the National Alliance for Medical Image Computing (NAMIC), funded by the National Institutes of Health through the NIH Roadmap for Medical Research, Grant U54 EB005149. """ input_spec = TractographyLabelMapSeedingInputSpec output_spec = TractographyLabelMapSeedingOutputSpec _cmd = "TractographyLabelMapSeeding " _outputs_filenames = {'OutputFibers':'OutputFibers.vtk','outputdirectory':'outputdirectory'} class DWIJointRicianLMMSEFilterInputSpec(CommandLineInputSpec): re = InputMultiPath(traits.Int, desc="Estimation radius.", sep=",", argstr="--re %s") rf = InputMultiPath(traits.Int, desc="Filtering radius.", sep=",", argstr="--rf %s") ng = traits.Int(desc="The number of the closest gradients that are used to jointly filter a given gradient direction (0 to use all).", argstr="--ng %d") inputVolume = File(position=-2, desc="Input DWI volume.", exists=True, argstr="%s") outputVolume = traits.Either(traits.Bool, File(), position=-1, hash_files=False, desc="Output DWI volume.", argstr="%s") compressOutput = traits.Bool(desc="Compress the data of the compressed file using gzip", argstr="--compressOutput ") class DWIJointRicianLMMSEFilterOutputSpec(TraitedSpec): outputVolume = File(position=-1, desc="Output DWI volume.", exists=True) class DWIJointRicianLMMSEFilter(SEMLikeCommandLine): """title: DWI Joint Rician LMMSE Filter category: Diffusion.Diffusion Weighted Images description: This module reduces Rician noise (or unwanted detail) on a set of diffusion weighted images. For this, it filters the image in the mean squared error sense using a Rician noise model. The N closest gradient directions to the direction being processed are filtered together to improve the results: the noise-free signal is seen as an n-diemensional vector which has to be estimated with the LMMSE method from a set of corrupted measurements. To that end, the covariance matrix of the noise-free vector and the cross covariance between this signal and the noise have to be estimated, which is done taking into account the image formation process. The noise parameter is automatically estimated from a rough segmentation of the background of the image. In this area the signal is simply 0, so that Rician statistics reduce to Rayleigh and the noise power can be easily estimated from the mode of the histogram. A complete description of the algorithm may be found in: Antonio Tristan-Vega and Santiago Aja-Fernandez, DWI filtering using joint information for DTI and HARDI, Medical Image Analysis, Volume 14, Issue 2, Pages 205-218. 2010. version: 0.1.1.$Revision: 1 $(alpha) documentation-url: http://wiki.slicer.org/slicerWiki/index.php/Documentation/4.1/Modules/JointRicianLMMSEImageFilter contributor: Antonio Tristan Vega (UVa), Santiago Aja Fernandez (UVa) acknowledgements: Partially founded by grant number TEC2007-67073/TCM from the Comision Interministerial de Ciencia y Tecnologia (Spain). """ input_spec = DWIJointRicianLMMSEFilterInputSpec output_spec = DWIJointRicianLMMSEFilterOutputSpec _cmd = "DWIJointRicianLMMSEFilter " _outputs_filenames = {'outputVolume':'outputVolume.nii'} class DiffusionWeightedVolumeMaskingInputSpec(CommandLineInputSpec): inputVolume = File(position=-4, desc="Input DWI volume", exists=True, argstr="%s") outputBaseline = traits.Either(traits.Bool, File(), position=-2, hash_files=False, desc="Estimated baseline volume", argstr="%s") thresholdMask = traits.Either(traits.Bool, File(), position=-1, hash_files=False, desc="Otsu Threshold Mask", argstr="%s") otsuomegathreshold = traits.Float(desc="Control the sharpness of the threshold in the Otsu computation. 0: lower threshold, 1: higher threhold", argstr="--otsuomegathreshold %f") removeislands = traits.Bool(desc="Remove Islands in Threshold Mask?", argstr="--removeislands ") class DiffusionWeightedVolumeMaskingOutputSpec(TraitedSpec): outputBaseline = File(position=-2, desc="Estimated baseline volume", exists=True) thresholdMask = File(position=-1, desc="Otsu Threshold Mask", exists=True) class DiffusionWeightedVolumeMasking(SEMLikeCommandLine): """title: Diffusion Weighted Volume Masking category: Diffusion.Diffusion Weighted Images description: <p>Performs a mask calculation from a diffusion weighted (DW) image.</p><p>Starting from a dw image, this module computes the baseline image averaging all the images without diffusion weighting and then applies the otsu segmentation algorithm in order to produce a mask. this mask can then be used when estimating the diffusion tensor (dt) image, not to estimate tensors all over the volume.</p> version: 0.1.0.$Revision: 1892 $(alpha) documentation-url: http://wiki.slicer.org/slicerWiki/index.php/Documentation/4.1/Modules/DiffusionWeightedMasking license: slicer3 contributor: Demian Wassermann (SPL, BWH) """ input_spec = DiffusionWeightedVolumeMaskingInputSpec output_spec = DiffusionWeightedVolumeMaskingOutputSpec _cmd = "DiffusionWeightedVolumeMasking " _outputs_filenames = {'outputBaseline':'outputBaseline.nii','thresholdMask':'thresholdMask.nii'} class DTIimportInputSpec(CommandLineInputSpec): inputFile = File(position=-2, desc="Input DTI file", exists=True, argstr="%s") outputTensor = traits.Either(traits.Bool, File(), position=-1, hash_files=False, desc="Output DTI volume", argstr="%s") testingmode = traits.Bool(desc="Enable testing mode. Sample helix file (helix-DTI.nhdr) will be loaded into Slicer and converted in Nifti.", argstr="--testingmode ") class DTIimportOutputSpec(TraitedSpec): outputTensor = File(position=-1, desc="Output DTI volume", exists=True) class DTIimport(SEMLikeCommandLine): """title: DTIimport category: Diffusion.Diffusion Data Conversion description: Import tensor datasets from various formats, including the NifTi file format version: 1.0 documentation-url: http://wiki.slicer.org/slicerWiki/index.php/Documentation/4.1/Modules/DTIImport contributor: Sonia Pujol (SPL, BWH) acknowledgements: This work is part of the National Alliance for Medical Image Computing (NA-MIC), funded by the National Institutes of Health through the NIH Roadmap for Medical Research, Grant U54 EB005149. """ input_spec = DTIimportInputSpec output_spec = DTIimportOutputSpec _cmd = "DTIimport " _outputs_filenames = {'outputTensor':'outputTensor.nii'} class DWIToDTIEstimationInputSpec(CommandLineInputSpec): inputVolume = File(position=-3, desc="Input DWI volume", exists=True, argstr="%s") mask = File(desc="Mask where the tensors will be computed", exists=True, argstr="--mask %s") outputTensor = traits.Either(traits.Bool, File(), position=-2, hash_files=False, desc="Estimated DTI volume", argstr="%s") outputBaseline = traits.Either(traits.Bool, File(), position=-1, hash_files=False, desc="Estimated baseline volume", argstr="%s") enumeration = traits.Enum("LS", "WLS", desc="LS: Least Squares, WLS: Weighted Least Squares", argstr="--enumeration %s") shiftNeg = traits.Bool(desc="Shift eigenvalues so all are positive (accounts for bad tensors related to noise or acquisition error)", argstr="--shiftNeg ") class DWIToDTIEstimationOutputSpec(TraitedSpec): outputTensor = File(position=-2, desc="Estimated DTI volume", exists=True) outputBaseline = File(position=-1, desc="Estimated baseline volume", exists=True) class DWIToDTIEstimation(SEMLikeCommandLine): """title: DWI to DTI Estimation category: Diffusion.Diffusion Weighted Images description: Performs a tensor model estimation from diffusion weighted images. There are three estimation methods available: least squares, weigthed least squares and non-linear estimation. The first method is the traditional method for tensor estimation and the fastest one. Weighted least squares takes into account the noise characteristics of the MRI images to weight the DWI samples used in the estimation based on its intensity magnitude. The last method is the more complex. version: 0.1.0.$Revision: 1892 $(alpha) documentation-url: http://wiki.slicer.org/slicerWiki/index.php/Documentation/4.1/Modules/DiffusionTensorEstimation license: slicer3 contributor: Raul San Jose (SPL, BWH) acknowledgements: This command module is based on the estimation functionality provided by the Teem library. This work is part of the National Alliance for Medical Image Computing (NAMIC), funded by the National Institutes of Health through the NIH Roadmap for Medical Research, Grant U54 EB005149. """ input_spec = DWIToDTIEstimationInputSpec output_spec = DWIToDTIEstimationOutputSpec _cmd = "DWIToDTIEstimation " _outputs_filenames = {'outputTensor':'outputTensor.nii','outputBaseline':'outputBaseline.nii'} class DiffusionTensorScalarMeasurementsInputSpec(CommandLineInputSpec): inputVolume = File(position=-3, desc="Input DTI volume", exists=True, argstr="%s") outputScalar = traits.Either(traits.Bool, File(), position=-1, hash_files=False, desc="Scalar volume derived from tensor", argstr="%s") enumeration = traits.Enum("Trace", "Determinant", "RelativeAnisotropy", "FractionalAnisotropy", "Mode", "LinearMeasure", "PlanarMeasure", "SphericalMeasure", "MinEigenvalue", "MidEigenvalue", "MaxEigenvalue", "MaxEigenvalueProjectionX", "MaxEigenvalueProjectionY", "MaxEigenvalueProjectionZ", "RAIMaxEigenvecX", "RAIMaxEigenvecY", "RAIMaxEigenvecZ", "MaxEigenvecX", "MaxEigenvecY", "MaxEigenvecZ", "D11", "D22", "D33", "ParallelDiffusivity", "PerpendicularDffusivity", desc="An enumeration of strings", argstr="--enumeration %s") class DiffusionTensorScalarMeasurementsOutputSpec(TraitedSpec): outputScalar = File(position=-1, desc="Scalar volume derived from tensor", exists=True) class DiffusionTensorScalarMeasurements(SEMLikeCommandLine): """title: Diffusion Tensor Scalar Measurements category: Diffusion.Diffusion Tensor Images description: Compute a set of different scalar measurements from a tensor field, specially oriented for Diffusion Tensors where some rotationally invariant measurements, like Fractional Anisotropy, are highly used to describe the anistropic behaviour of the tensor. version: 0.1.0.$Revision: 1892 $(alpha) documentation-url: http://wiki.slicer.org/slicerWiki/index.php/Documentation/4.1/Modules/DiffusionTensorMathematics contributor: Raul San Jose (SPL, BWH) acknowledgements: LMI """ input_spec = DiffusionTensorScalarMeasurementsInputSpec output_spec = DiffusionTensorScalarMeasurementsOutputSpec _cmd = "DiffusionTensorScalarMeasurements " _outputs_filenames = {'outputScalar':'outputScalar.nii'} class DTIexportInputSpec(CommandLineInputSpec): inputTensor = File(position=-2, desc="Input DTI volume", exists=True, argstr="%s") outputFile = traits.Either(traits.Bool, File(), position=-1, hash_files=False, desc="Output DTI file", argstr="%s") class DTIexportOutputSpec(TraitedSpec): outputFile = File(position=-1, desc="Output DTI file", exists=True) class DTIexport(SEMLikeCommandLine): """title: DTIexport category: Diffusion.Diffusion Data Conversion description: Export DTI data to various file formats version: 1.0 documentation-url: http://wiki.slicer.org/slicerWiki/index.php/Documentation/4.1/Modules/DTIExport contributor: Sonia Pujol (SPL, BWH) acknowledgements: This work is part of the National Alliance for Medical Image Computing (NA-MIC), funded by the National Institutes of Health through the NIH Roadmap for Medical Research, Grant U54 EB005149. """ input_spec = DTIexportInputSpec output_spec = DTIexportOutputSpec _cmd = "DTIexport " _outputs_filenames = {'outputFile':'outputFile'}
bsd-3-clause
KellyChan/Python
python/sklearn/examples/general/pipeline_anova_svm.py
3
1360
#-----------------------------------------------------------# # Project: Pipeline Anova SVM # Author: Kelly Chan # Date: Apr 22 2014 #-----------------------------------------------------------# print(__doc__) from sklearn import svm from sklearn.datasets import samples_generator from sklearn.feature_selection import SelectKBest, f_regression from sklearn.pipeline import Pipeline def loadData(): # generating data X, y = samples_generator.make_classification(n_features=20, \ n_informative=3, \ n_redundant=0, \ n_classes=4, \ n_clusters_per_class=2) return X, y # ANOVA SVM-C def createANOVASVM(): # anova filter, take 3 best ranked features anova_filter = SelectKBest(f_regression, k=3) # svm clf = svm.SVC(kernel='linear') anova_svm = Pipeline([('anova', anova_filter), \ ('svm', clf)]) return anova_svm def predict(X, y, anova_svm): anova_svm.fit(X, y) target = anova_svm.predict(X) return target def test(): X, y = loadData() anova_svm = createANOVASVM() target = predict(X, y, anova_svm) print target if __name__ == '__main__': test()
mit
arabenjamin/scikit-learn
sklearn/cluster/tests/test_mean_shift.py
120
3429
""" Testing for mean shift clustering methods """ import numpy as np import warnings from sklearn.utils.testing import assert_equal from sklearn.utils.testing import assert_false from sklearn.utils.testing import assert_true from sklearn.utils.testing import assert_array_equal from sklearn.utils.testing import assert_raise_message from sklearn.cluster import MeanShift from sklearn.cluster import mean_shift from sklearn.cluster import estimate_bandwidth from sklearn.cluster import get_bin_seeds from sklearn.datasets.samples_generator import make_blobs n_clusters = 3 centers = np.array([[1, 1], [-1, -1], [1, -1]]) + 10 X, _ = make_blobs(n_samples=300, n_features=2, centers=centers, cluster_std=0.4, shuffle=True, random_state=11) def test_estimate_bandwidth(): # Test estimate_bandwidth bandwidth = estimate_bandwidth(X, n_samples=200) assert_true(0.9 <= bandwidth <= 1.5) def test_mean_shift(): # Test MeanShift algorithm bandwidth = 1.2 ms = MeanShift(bandwidth=bandwidth) labels = ms.fit(X).labels_ labels_unique = np.unique(labels) n_clusters_ = len(labels_unique) assert_equal(n_clusters_, n_clusters) cluster_centers, labels = mean_shift(X, bandwidth=bandwidth) labels_unique = np.unique(labels) n_clusters_ = len(labels_unique) assert_equal(n_clusters_, n_clusters) def test_meanshift_predict(): # Test MeanShift.predict ms = MeanShift(bandwidth=1.2) labels = ms.fit_predict(X) labels2 = ms.predict(X) assert_array_equal(labels, labels2) def test_meanshift_all_orphans(): # init away from the data, crash with a sensible warning ms = MeanShift(bandwidth=0.1, seeds=[[-9, -9], [-10, -10]]) msg = "No point was within bandwidth=0.1" assert_raise_message(ValueError, msg, ms.fit, X,) def test_unfitted(): # Non-regression: before fit, there should be not fitted attributes. ms = MeanShift() assert_false(hasattr(ms, "cluster_centers_")) assert_false(hasattr(ms, "labels_")) def test_bin_seeds(): # Test the bin seeding technique which can be used in the mean shift # algorithm # Data is just 6 points in the plane X = np.array([[1., 1.], [1.4, 1.4], [1.8, 1.2], [2., 1.], [2.1, 1.1], [0., 0.]]) # With a bin coarseness of 1.0 and min_bin_freq of 1, 3 bins should be # found ground_truth = set([(1., 1.), (2., 1.), (0., 0.)]) test_bins = get_bin_seeds(X, 1, 1) test_result = set([tuple(p) for p in test_bins]) assert_true(len(ground_truth.symmetric_difference(test_result)) == 0) # With a bin coarseness of 1.0 and min_bin_freq of 2, 2 bins should be # found ground_truth = set([(1., 1.), (2., 1.)]) test_bins = get_bin_seeds(X, 1, 2) test_result = set([tuple(p) for p in test_bins]) assert_true(len(ground_truth.symmetric_difference(test_result)) == 0) # With a bin size of 0.01 and min_bin_freq of 1, 6 bins should be found # we bail and use the whole data here. with warnings.catch_warnings(record=True): test_bins = get_bin_seeds(X, 0.01, 1) assert_array_equal(test_bins, X) # tight clusters around [0, 0] and [1, 1], only get two bins X, _ = make_blobs(n_samples=100, n_features=2, centers=[[0, 0], [1, 1]], cluster_std=0.1, random_state=0) test_bins = get_bin_seeds(X, 1) assert_array_equal(test_bins, [[0, 0], [1, 1]])
bsd-3-clause
tensorflow/tensorflow-experimental_link_static_libraries_once
tensorflow/python/distribute/strategy_test_lib.py
11
32895
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Library for testing DistributionStrategy descendants.""" import functools import os import tempfile import numpy as np from tensorflow.core.protobuf import config_pb2 from tensorflow.core.util import event_pb2 from tensorflow.python.client import session as session_lib from tensorflow.python.data.ops import dataset_ops from tensorflow.python.distribute import collective_all_reduce_strategy as mwms_lib from tensorflow.python.distribute import distribute_lib from tensorflow.python.distribute import distribute_utils from tensorflow.python.distribute import distribution_strategy_context as ds_context from tensorflow.python.distribute import mirrored_strategy as mirrored_lib from tensorflow.python.distribute import reduce_util from tensorflow.python.distribute import tpu_strategy from tensorflow.python.eager import backprop from tensorflow.python.eager import context from tensorflow.python.eager import def_function from tensorflow.python.eager import test from tensorflow.python.framework import dtypes from tensorflow.python.framework import errors from tensorflow.python.framework import ops from tensorflow.python.framework import test_util from tensorflow.python.lib.io import tf_record from tensorflow.python.ops import array_ops from tensorflow.python.ops import gradients_impl from tensorflow.python.ops import init_ops from tensorflow.python.ops import init_ops_v2 from tensorflow.python.ops import math_ops from tensorflow.python.ops import summary_ops_v2 as summary_ops from tensorflow.python.ops import variable_scope from tensorflow.python.ops import variables from tensorflow.python.platform import gfile from tensorflow.python.training import optimizer from tensorflow.python.training import training_util from tensorflow.python.util import nest from tensorflow.python.util import tf_inspect class _TestException(Exception): pass # Conditionally wrap the fn in a def_function.function (so it runs in graph # mode). def _maybe_run_in_function(fn, run_in_function=False): if not run_in_function or not context.executing_eagerly(): return fn else: return def_function.function()(fn) # May be the argument to either distribution.extended.call_for_each_replica() or # get_replica_context().merge_call() def _raise_exception_fn(_=None): raise _TestException() # Must be the argument to a distribution.extended.call_for_each_replica() call, # calls a get_replica_context().merge_call() that raises an exception. def _merge_raises_fn(): ds_context.get_replica_context().merge_call(_raise_exception_fn) # Must be the argument to a get_replica_context().merge_call() call, calls # dist.extended.call_for_each_replica() with a function that raises an # exception. def _call_raises_fn(dist): dist.extended.call_for_each_replica(_raise_exception_fn) # Must be the argument to a distribution.extended.call_for_each_replica() call, # calls a get_replica_context().merge_call() that calls a # call_for_each_replica() that raises an exception. def _merge_call_raises_fn(): ds_context.get_replica_context().merge_call(_call_raises_fn) # Must be the argument to a get_replica_context().merge_call() call, calls # dist.extended.call_for_each_replica() with a function that calls a # get_replica_context().merge_call() that raises an exception. def _call_merge_raises_fn(dist): dist.extended.call_for_each_replica(_merge_raises_fn) # Must be the argument to a distribution.extended.call_for_each_replica() call, # calls a get_replica_context().merge_call() that calls a # call_for_each_replica() that calls a get_replica_context().merge_call() that # raises an exception. def _merge_call_merge_raises_fn(): ds_context.get_replica_context().merge_call(_call_merge_raises_fn) def _events_from_logdir(test_case, logdir): """Reads summary events from log directory.""" test_case.assertTrue(gfile.Exists(logdir)) files = gfile.ListDirectory(logdir) test_case.assertLen(files, 1) records = list(tf_record.tf_record_iterator(os.path.join(logdir, files[0]))) result = [] for r in records: event = event_pb2.Event() event.ParseFromString(r) result.append(event) return result def create_variable_like_keras_layer(name, shape, dtype): """Utitlity for create variables that works like variable in keras layer.""" initializer = functools.partial( init_ops_v2.GlorotUniform(), shape, dtype=dtype) return variables.Variable( initial_value=initializer, name=name, trainable=True) def is_optimizer_v2_instance(optimizer_obj): # For a optimizer instance, the v2 implementation has var_list as a required # argument. arg_spec = tf_inspect.getfullargspec(optimizer_obj.minimize) return "var_list" in arg_spec.args[:-len(arg_spec.defaults)] def is_mirrored_strategy(strategy: distribute_lib.Strategy) -> bool: return isinstance( strategy, (mirrored_lib.MirroredStrategy, mirrored_lib.MirroredStrategyV1)) def is_multi_worker_mirrored_strategy( strategy: distribute_lib.Strategy) -> bool: return isinstance(strategy, (mwms_lib.CollectiveAllReduceStrategy, mwms_lib.CollectiveAllReduceStrategyV1)) def is_tpu_strategy(strategy: distribute_lib.Strategy) -> bool: return isinstance(strategy, (tpu_strategy.TPUStrategy, tpu_strategy.TPUStrategyV1, tpu_strategy.TPUStrategyV2)) class DistributionTestBase(test.TestCase): """Some tests that should work with any DistributionStrategy.""" def _test_minimize_loss_eager(self, d): with d.scope(): kernel = create_variable_like_keras_layer( name="kernel", shape=(1, 1), dtype=dtypes.float32) def loss(x): y = array_ops.reshape( math_ops.mat_mul(x, kernel), []) - array_ops.identity(1.) return y * y # TODO(isaprykin): Extract implicit_grad+get_filtered_grad_fn into a # common `implicit_grad` function and put it in DistributionStrategy. grad_fn = backprop.implicit_grad(loss) grad_fn = optimizer.get_filtered_grad_fn(grad_fn) def update(v, g): return v.assign_sub(0.2 * g) one = array_ops.identity([[1.]]) def step(): """Perform one optimization step.""" # Run forward & backward to get gradients, variables list. g_v = d.extended.call_for_each_replica(grad_fn, args=(one,)) # Update the variables using the gradients and the update() function. before_list = [] after_list = [] for g, v in g_v: fetched = d.extended.read_var(v) before_list.append(fetched) # control_dependencies irrelevant but harmless in eager execution with ops.control_dependencies([fetched]): g = d.extended.reduce_to( reduce_util.ReduceOp.SUM, g, destinations=v) with ops.control_dependencies( d.extended.update(v, update, args=(g,), group=False)): after_list.append(d.extended.read_var(v)) return before_list, after_list for i in range(10): b, a = step() if i == 0: before, = b # pylint: disable=unbalanced-tuple-unpacking after, = a # pylint: disable=unbalanced-tuple-unpacking error_before = abs(before.numpy() - 1) error_after = abs(after.numpy() - 1) # Error should go down self.assertLess(error_after, error_before) def _test_minimize_loss_graph(self, d, soft_placement=False, learning_rate=0.2): config = config_pb2.ConfigProto() config.allow_soft_placement = soft_placement config.gpu_options.per_process_gpu_memory_fraction = 0.3 with context.graph_mode(), \ ops.Graph().as_default(), \ self.cached_session(config=config) as sess, \ d.scope(): kernel = create_variable_like_keras_layer( name="kernel", shape=(1, 1), dtype=dtypes.float32) def loss(x): y = array_ops.reshape( math_ops.mat_mul(x, kernel), []) - array_ops.identity(1.) return y * y grad_fn = backprop.implicit_grad(loss) def update(v, g): return v.assign_sub(learning_rate * g) one = array_ops.identity([[1.]]) def step(): """Perform one optimization step.""" # Run forward & backward to get gradients, variables list. g_v = d.extended.call_for_each_replica(grad_fn, args=(one,)) # Update the variables using the gradients and the update() function. before_list = [] after_list = [] for g, v in g_v: fetched = d.extended.read_var(v) before_list.append(fetched) with ops.control_dependencies([fetched]): g = d.extended.reduce_to( reduce_util.ReduceOp.SUM, g, destinations=v) with ops.control_dependencies( d.extended.update(v, update, args=(g,), group=False)): after_list.append(d.extended.read_var(v)) return before_list, after_list before_out, after_out = step() variables.global_variables_initializer().run() for i in range(10): b, a = sess.run((before_out, after_out)) if i == 0: before, = b after, = a error_before = abs(before - 1) error_after = abs(after - 1) # Error should go down self.assertLess(error_after, error_before) def _test_summary_for_replica_zero_only(self, d): logdir = tempfile.mkdtemp() def run_fn(): """Function executed for each replica.""" with summary_writer.as_default(): replica_id = ds_context.get_replica_context().replica_id_in_sync_group return summary_ops.write("a", replica_id) with self.cached_session() as sess, d.scope(), \ summary_ops.always_record_summaries(): # We need global_step because summary writing op *always* has global_step # as input, even when we always record summary or never record summary. global_step = training_util.get_or_create_global_step() if not context.executing_eagerly(): # When executing eagerly, variables are initialized immediately after # creation, and its initializer will be None. global_step.initializer.run() summary_ops.set_step(0) summary_writer = summary_ops.create_file_writer(logdir) output = d.extended.call_for_each_replica(run_fn) unwrapped = d.unwrap(output) if not context.executing_eagerly(): sess.run(summary_writer.init()) sess.run(unwrapped) sess.run(summary_writer.close()) events = _events_from_logdir(self, logdir) # There will be 2 entries: 1 summary file header entry, and 1 entry # written by replica 0. self.assertLen(events, 2) self.assertEqual(events[1].summary.value[0].tag, "a") self.assertEqual(events[1].summary.value[0].simple_value, 0.0) def _test_replica_id(self, d): with d.scope(): expected_devices = [False] * len(d.extended.worker_devices) def mark_devices_fn(): replica_id = self.evaluate( ds_context.get_replica_context().replica_id_in_sync_group) self.assertLess(replica_id, len(d.extended.worker_devices)) self.assertFalse(expected_devices[replica_id]) expected_devices[replica_id] = True d.extended.call_for_each_replica(mark_devices_fn) self.assertAllEqual(expected_devices, [True] * len(d.extended.worker_devices)) def _test_call_and_merge_exceptions(self, dist): with dist.scope(): with self.assertRaises(_TestException): dist.extended.call_for_each_replica(_raise_exception_fn) with self.assertRaises(_TestException): dist.extended.call_for_each_replica(_merge_raises_fn) with self.assertRaises(_TestException): dist.extended.call_for_each_replica(_merge_call_raises_fn) with self.assertRaises(_TestException): dist.extended.call_for_each_replica(_merge_call_merge_raises_fn) def _input_fn_to_test_input_context(self, dataset_or_callable_fn, expected_num_replicas_in_sync, expected_num_input_pipelines, expected_input_pipeline_id): # Use a list of one element as counter so that it can be captured by the # `_input_fn`. This counter is incremented by 1 each time an input_fn is # called. We use this counter to check whether the `input_pipeline_id` # matches the counter in the in-graph replication. worker_id_counter = [0] def _input_fn(input_context): """Input fn for testing.""" self.assertIsNotNone(input_context) self.assertEqual(expected_num_replicas_in_sync, input_context.num_replicas_in_sync) self.assertEqual(expected_num_input_pipelines, input_context.num_input_pipelines) if expected_input_pipeline_id is not None: self.assertEqual(expected_input_pipeline_id, input_context.input_pipeline_id) else: self.assertEqual(worker_id_counter[0], input_context.input_pipeline_id) worker_id_counter[0] += 1 return dataset_or_callable_fn() return _input_fn def _test_input_fn_iterable( self, strategy, input_fn, expected_values, ignore_order=False): assert_same = self.assertCountEqual if ignore_order else self.assertEqual iterable = strategy.distribute_datasets_from_function(input_fn) if context.executing_eagerly(): iterator = iter(iterable) for expected_value in expected_values: computed_value = self.evaluate( list(strategy.experimental_local_results(next(iterator)))) assert_same(expected_value, computed_value) with self.assertRaises(StopIteration): self.evaluate(strategy.experimental_local_results(next(iterator))) # After re-initializing the iterator, should be able to iterate again. iterator = iter(iterable) for expected_value in expected_values: computed_value = self.evaluate( list(strategy.experimental_local_results(next(iterator)))) assert_same(expected_value, computed_value) else: iterator = dataset_ops.make_initializable_iterator(iterable) self._test_input_fn_iterator(iterator, strategy.extended.worker_devices, expected_values, test_reinitialize=True, ignore_order=ignore_order) def _test_input_fn_iterator(self, iterator, devices, expected_values, sess=None, test_reinitialize=True, ignore_order=False): evaluate = lambda x: sess.run(x) if sess else self.evaluate(x) evaluate(iterator.initializer) for expected_value in expected_values: next_element = iterator.get_next() computed_value = evaluate( [distribute_utils.select_replica(r, next_element) for r in range(len(devices))]) if ignore_order: self.assertCountEqual(expected_value, computed_value) else: self.assertEqual(expected_value, computed_value) with self.assertRaises(errors.OutOfRangeError): next_element = iterator.get_next() evaluate( [distribute_utils.select_replica(r, next_element) for r in range(len(devices))]) # After re-initializing the iterator, should be able to iterate again. if test_reinitialize: evaluate(iterator.initializer) for expected_value in expected_values: next_element = iterator.get_next() computed_value = evaluate([ distribute_utils.select_replica(r, next_element) for r in range(len(devices)) ]) if ignore_order: self.assertCountEqual(expected_value, computed_value) else: self.assertEqual(expected_value, computed_value) def _test_global_step_update(self, strategy): with strategy.scope(): global_step = variable_scope.get_variable( "global_step", shape=[], dtype=dtypes.int64, initializer=init_ops.zeros_initializer(), trainable=False, aggregation=variables.VariableAggregation.ONLY_FIRST_REPLICA) self.evaluate(variables.global_variables_initializer()) def model_fn(): train_op = global_step.assign_add(1) value = global_step.read_value() return train_op, value train_ops, value = strategy.extended.call_for_each_replica(model_fn) self.evaluate(strategy.group(train_ops)) global_step_tensors = strategy.experimental_local_results(value) global_step_values = self.evaluate(global_step_tensors) self.assertEqual((1,) * len(global_step_tensors), global_step_values) def _test_numpy_dataset(self, strategy, session=None, run_in_function=False): if not isinstance(strategy, distribute_lib.StrategyV1): self.skipTest("n/a: V1 only") cached_session = session or self.cached_session() with strategy.scope(), cached_session as sess: x = np.asarray([[1, 2], [6, 12], [2, 4], [5, 10], [3, 6], [4, 8]]) y = np.asarray([5, 4, 3, 2, 1, 0]) batch_size = 6 if not strategy.extended._global_batch_size: # pylint: disable=protected-access batch_size = batch_size // strategy.num_replicas_in_sync ds = strategy.extended.experimental_make_numpy_dataset( (x, y), session=sess or self.cached_session()) ds = ds.repeat(2) # 2 epochs # We need to use the drop_remainder argument to get a known static # input shape which is required for TPUs. drop_remainder = strategy.extended.experimental_require_static_shapes ds = ds.batch(batch_size, drop_remainder=drop_remainder) i = strategy.make_dataset_iterator(ds) self.evaluate(i.initializer) def run_and_concatenate(strategy, i): x, y = strategy.experimental_run( _maybe_run_in_function(lambda z: z, run_in_function), i) x, y = self.evaluate((strategy.experimental_local_results(x), strategy.experimental_local_results(y))) return np.concatenate(x), np.concatenate(y) x_1, y_1 = run_and_concatenate(strategy, i) self.assertAllEqual(x, x_1) self.assertAllEqual(y, y_1) x_2, y_2 = run_and_concatenate(strategy, i) self.assertAllEqual(x, x_2) self.assertAllEqual(y, y_2) with self.assertRaises(errors.OutOfRangeError): run_and_concatenate(strategy, i) def _test_trainable_variable(self, strategy): for cls in [variables.VariableV1, variables.Variable]: with strategy.scope(): v1 = cls(1.0) self.assertEqual(True, v1.trainable) v2 = cls(1.0, synchronization=variables.VariableSynchronization.ON_READ) self.assertEqual(False, v2.trainable) v3 = cls(1.0, synchronization=variables.VariableSynchronization.ON_READ, trainable=True) self.assertEqual(True, v3.trainable) v4 = cls(1.0, synchronization=variables.VariableSynchronization.ON_READ, trainable=False) self.assertEqual(False, v4.trainable) class OneDeviceDistributionTestBase(test.TestCase): """Some tests that should work with any one-device DistributionStrategy.""" def _test_run(self, strategy): out1 = strategy.run(lambda: array_ops.identity(4.)) self.assertAllEqual([4.], self.evaluate(strategy.unwrap(out1))) out2 = strategy.run(lambda x: {"a": x * 2, "b": x * x}, args=(out1,)) out2_vals = self.evaluate(nest.map_structure(strategy.unwrap, out2)) self.assertAllEqual([8.], out2_vals["a"]) self.assertAllEqual([16.], out2_vals["b"]) out3 = strategy.run(lambda b, a: a + 2 * b + 2, kwargs=out2) self.assertAllEqual([42.], self.evaluate(strategy.unwrap(out3))) def _test_all_reduce_sum(self, strategy): self._test_collective_comms( strategy, _all_sum, inputs=(4., [42., 43.]), expected=(4., [42., 43.])) def _test_all_reduce_sum_gradients(self, strategy): self._test_collective_comms_gradients( strategy, _all_sum, inputs=[4.], expected_grads=[4.]) def _test_all_reduce_sum_gradient_tape(self, strategy): self._test_collective_comms_gradient_tape( strategy, _all_sum, inputs=[4.], expected_grads=[4.]) def _test_all_reduce_mean(self, strategy): self._test_collective_comms( strategy, _all_mean, inputs=(2., [21., 22.]), expected=(2., [21., 22.])) def _test_all_reduce_mean_gradients(self, strategy): self._test_collective_comms_gradients( strategy, _all_mean, inputs=[5.], expected_grads=[5.]) def _test_all_reduce_mean_gradient_tape(self, strategy): self._test_collective_comms_gradient_tape( strategy, _all_mean, inputs=[5.], expected_grads=[5.]) def _test_collective_comms(self, strategy, comm_fn, inputs, expected): inputs = strategy.make_input_fn_iterator( lambda _: dataset_ops.Dataset.from_tensors(inputs)) self.evaluate(inputs.initialize()) outputs = self.evaluate( list( map(strategy.experimental_local_results, strategy.experimental_run(comm_fn, inputs)))) self.assertAllEqual([expected[0]], outputs[0]) self.assertAllEqual([expected[1]], outputs[1]) def _test_collective_comms_gradients(self, strategy, comm_fn, inputs, expected_grads): if context.executing_eagerly(): self.skipTest("`tf.gradients` is not supported with eager execution.") def step(c): x = array_ops.identity(42.) y = comm_fn(x) * c return gradients_impl.gradients(y, [x])[0] inputs = strategy.make_input_fn_iterator( lambda _: dataset_ops.Dataset.from_tensors(inputs)) self.evaluate(inputs.initialize()) self.assertAllEqual( expected_grads, self.evaluate( strategy.experimental_local_results( strategy.experimental_run(step, inputs)))) def _test_collective_comms_gradient_tape(self, strategy, comm_fn, inputs, expected_grads): def step(c): x = array_ops.identity(42.) with backprop.GradientTape() as tape: tape.watch(x) y = comm_fn(x) * c return tape.gradient(y, x) inputs = strategy.make_input_fn_iterator( lambda _: dataset_ops.Dataset.from_tensors(inputs)) self.evaluate(inputs.initialize()) self.assertAllEqual( expected_grads, self.evaluate( strategy.experimental_local_results( strategy.experimental_run(step, inputs)))) def _test_device_and_input_device_are_colocated(self, strategy): if context.executing_eagerly(): self.skipTest( "cross-device tests are not supported with eager execution.") workers, _ = test_util.create_local_cluster(2, 0) inputs = strategy.make_input_fn_iterator( lambda _: dataset_ops.Dataset.range(5)) comm_fn = lambda x: x + 1 run_op = strategy.experimental_run(comm_fn, inputs) with session_lib.Session(target=workers[1].target) as sess: sess.run(inputs.initialize()) sess.run(run_op) def _test_device_and_input_device_are_colocated_with_function(self, strategy): if context.executing_eagerly(): self.skipTest( "cross-device tests are not supported with eager execution.") workers, _ = test_util.create_local_cluster(2, 0) inputs = strategy.make_input_fn_iterator( lambda _: dataset_ops.Dataset.range(5)) comm_fn = lambda x: x + 1 experimental_run = def_function.function()(strategy.experimental_run) with ops.device("/job:worker/replica:0/task:1/device:CPU:0"): # The tf.function must be defined on the right device as well. run_op = experimental_run(comm_fn, inputs) with session_lib.Session(target=workers[1].target) as sess: sess.run(inputs.initialize()) sess.run(run_op) class TwoDeviceDistributionTestBase(test.TestCase): """Some tests that should work with any two-device DistributionStrategy.""" def _test_run(self, strategy, run_in_function=False): out1 = strategy.run(_maybe_run_in_function( lambda: ds_context.get_replica_context().replica_id_in_sync_group + 1, run_in_function)) self.assertAllEqual([1, 2], self.evaluate(strategy.unwrap(out1))) out2 = strategy.run(_maybe_run_in_function( lambda x: {"a": x * 2, "b": x * x}, run_in_function), args=(out1,)) out2_vals = self.evaluate(nest.map_structure(strategy.unwrap, out2)) self.assertAllEqual([2, 4], out2_vals["a"]) self.assertAllEqual([1, 4], out2_vals["b"]) out3 = strategy.run(_maybe_run_in_function( lambda b, a: a + 2 * b + 2, run_in_function), kwargs=out2) self.assertAllEqual([6, 14], self.evaluate(strategy.unwrap(out3))) def _test_all_reduce_sum(self, strategy, run_in_function=False): self._test_collective_comms( strategy, _all_sum, inputs=([1., 3.], [[39., 2.], [3., 41.]]), expected=(4., [42., 43.]), run_in_function=run_in_function) def _test_all_reduce_sum_gradients(self, strategy, run_in_function=False): self._test_collective_comms_gradients( strategy, _all_sum, inputs=[1., 3.], expected_grads=[4., 4.], run_in_function=run_in_function) def _test_all_reduce_sum_gradient_tape(self, strategy, run_in_function=False): self._test_collective_comms_gradient_tape( strategy, _all_sum, inputs=[1., 3.], expected_grads=[4., 4.], run_in_function=run_in_function) def _test_all_reduce_mean(self, strategy, run_in_function=False): self._test_collective_comms( strategy, _all_mean, inputs=([1., 3.], [[39., 2.], [3., 41.]]), expected=(2., [21., 21.5]), run_in_function=run_in_function) def _test_all_reduce_mean_gradients(self, strategy, run_in_function=False): self._test_collective_comms_gradients( strategy, _all_mean, inputs=[1., 3.], expected_grads=[2., 2.], run_in_function=run_in_function) def _test_all_reduce_mean_gradient_tape(self, strategy, run_in_function=False): self._test_collective_comms_gradient_tape( strategy, _all_mean, inputs=[1., 3.], expected_grads=[2., 2.], run_in_function=run_in_function) def _test_collective_comms(self, strategy, comm_fn, inputs, expected, run_in_function=False): inputs = strategy.make_input_fn_iterator( lambda _: dataset_ops.Dataset.from_tensor_slices(inputs)) self.evaluate(inputs.initialize()) outputs = self.evaluate( list( map(strategy.experimental_local_results, strategy.experimental_run( _maybe_run_in_function(comm_fn, run_in_function), inputs)))) self.assertAllEqual([expected[0], expected[0]], outputs[0]) self.assertAllEqual([expected[1], expected[1]], outputs[1]) def _test_collective_comms_gradients(self, strategy, comm_fn, inputs, expected_grads, run_in_function=False): if context.executing_eagerly() and not run_in_function: self.skipTest("`tf.gradients` is not supported with eager execution " "without using tf.functions.") def step(c): x = array_ops.identity(42.) y = comm_fn(x) * c return gradients_impl.gradients(y, [x])[0] inputs = strategy.make_input_fn_iterator( lambda _: dataset_ops.Dataset.from_tensor_slices(inputs)) self.evaluate(inputs.initialize()) self.assertAllEqual( expected_grads, self.evaluate( strategy.experimental_local_results( strategy.experimental_run( _maybe_run_in_function(step, run_in_function), inputs)))) def _test_collective_comms_gradient_tape(self, strategy, comm_fn, inputs, expected_grads, run_in_function=False): def step(c): x = array_ops.identity(42.) with backprop.GradientTape() as tape: tape.watch(x) y = comm_fn(x) * c return tape.gradient(y, x) inputs = strategy.make_input_fn_iterator( lambda _: dataset_ops.Dataset.from_tensor_slices(inputs)) self.evaluate(inputs.initialize()) self.assertAllEqual( expected_grads, self.evaluate( strategy.experimental_local_results( strategy.experimental_run( _maybe_run_in_function(step, run_in_function), inputs)))) class RemoteSingleWorkerMirroredStrategyBase(DistributionTestBase): """Tests for a Remote single worker.""" def _get_num_gpus(self): pass def _testNumReplicasInSync(self, distribution): self.assertEqual(self._get_num_gpus(), distribution.num_replicas_in_sync) def _testMinimizeLoss(self, distribution): if context.executing_eagerly(): self._test_minimize_loss_eager(distribution) else: self._test_minimize_loss_graph(distribution, learning_rate=0.05) def _testDeviceScope(self, distribution): with distribution.scope(): a = array_ops.identity(1.) with ops.device("/cpu:0"): b = array_ops.identity(1.) if context.executing_eagerly(): device = "/job:worker/replica:0/task:0/device:CPU:0" else: device = "/job:worker/replica:0/task:0" self.assertEqual(a.device, device) self.assertEqual(b.device, "/job:worker/replica:0/task:0/device:CPU:0") def _testMakeInputFnIteratorWithDataset(self, distribution): dataset_fn = lambda: dataset_ops.Dataset.range(100) num_gpus = self._get_num_gpus() # pylint: disable=assignment-from-no-return num_workers = 1 expected_values = [[i+j for j in range(num_gpus)] * num_workers for i in range(0, 100, num_gpus)] # Dummy cached_session is used in Eager with self.cached_session() as sess: # `expected_input_pipeline_id` is None because the input_fn will be called # multiple times, each with a different input_pipeline_id. input_fn = self._input_fn_to_test_input_context( dataset_fn, expected_num_replicas_in_sync=num_workers*num_gpus, expected_num_input_pipelines=num_workers, expected_input_pipeline_id=None) iterator = distribution.make_input_fn_iterator(input_fn) self._test_input_fn_iterator( iterator, distribution.extended.worker_devices, expected_values, sess) def _testMakeInputFnIteratorWithCallable(self, distribution): def fn(): dataset = dataset_ops.Dataset.range(100) it = dataset_ops.make_one_shot_iterator(dataset) return it.get_next num_gpus = self._get_num_gpus() # pylint: disable=assignment-from-no-return num_workers = 1 expected_values = [] for i in range(0, 100, num_gpus): expected_values.append([i+j for j in range(num_gpus)] * num_workers) # Dummy cached_session is used in Eager with self.cached_session() as sess: # `expected_input_pipeline_id` is None because the input_fn will be called # multiple times, each with a different input_pipeline_id. input_fn = self._input_fn_to_test_input_context( fn, expected_num_replicas_in_sync=num_workers*num_gpus, expected_num_input_pipelines=num_workers, expected_input_pipeline_id=None) iterator = distribution.make_input_fn_iterator(input_fn) self._test_input_fn_iterator( iterator, distribution.extended.worker_devices, expected_values, sess, test_reinitialize=False, ignore_order=True) def _all_sum(value): ctx = ds_context.get_replica_context() return ctx.all_reduce(reduce_util.ReduceOp.SUM, value) def _all_mean(value): ctx = ds_context.get_replica_context() return ctx.all_reduce(reduce_util.ReduceOp.MEAN, value)
apache-2.0
catap/namebench
nb_third_party/dns/rdtypes/ANY/CNAME.py
248
1092
# Copyright (C) 2003-2007, 2009, 2010 Nominum, Inc. # # Permission to use, copy, modify, and distribute this software and its # documentation for any purpose with or without fee is hereby granted, # provided that the above copyright notice and this permission notice # appear in all copies. # # THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES # WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF # MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR # ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES # WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN # ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT # OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. import dns.rdtypes.nsbase class CNAME(dns.rdtypes.nsbase.NSBase): """CNAME record Note: although CNAME is officially a singleton type, dnspython allows non-singleton CNAME rdatasets because such sets have been commonly used by BIND and other nameservers for load balancing.""" pass
apache-2.0
DonBeo/scikit-learn
sklearn/cross_validation.py
5
57208
""" The :mod:`sklearn.cross_validation` module includes utilities for cross- validation and performance evaluation. """ # Author: Alexandre Gramfort <alexandre.gramfort@inria.fr>, # Gael Varoquaux <gael.varoquaux@normalesup.org>, # Olivier Grisel <olivier.grisel@ensta.org> # License: BSD 3 clause from __future__ import print_function from __future__ import division import warnings from itertools import chain, combinations from math import ceil, floor, factorial import numbers import time from abc import ABCMeta, abstractmethod import numpy as np import scipy.sparse as sp from .base import is_classifier, clone from .utils import indexable, check_random_state, safe_indexing from .utils.validation import (_is_arraylike, _num_samples, check_array, column_or_1d) from .utils.multiclass import type_of_target from .externals.joblib import Parallel, delayed, logger from .externals.six import with_metaclass from .externals.six.moves import zip from .metrics.scorer import check_scoring from .utils.fixes import bincount __all__ = ['KFold', 'LeaveOneLabelOut', 'LeaveOneOut', 'LeavePLabelOut', 'LeavePOut', 'ShuffleSplit', 'StratifiedKFold', 'StratifiedShuffleSplit', 'PredefinedSplit', 'check_cv', 'cross_val_score', 'cross_val_predict', 'permutation_test_score', 'train_test_split'] class _PartitionIterator(with_metaclass(ABCMeta)): """Base class for CV iterators where train_mask = ~test_mask Implementations must define `_iter_test_masks` or `_iter_test_indices`. Parameters ---------- n : int Total number of elements in dataset. """ def __init__(self, n): if abs(n - int(n)) >= np.finfo('f').eps: raise ValueError("n must be an integer") self.n = int(n) def __iter__(self): ind = np.arange(self.n) for test_index in self._iter_test_masks(): train_index = np.logical_not(test_index) train_index = ind[train_index] test_index = ind[test_index] yield train_index, test_index # Since subclasses must implement either _iter_test_masks or # _iter_test_indices, neither can be abstract. def _iter_test_masks(self): """Generates boolean masks corresponding to test sets. By default, delegates to _iter_test_indices() """ for test_index in self._iter_test_indices(): test_mask = self._empty_mask() test_mask[test_index] = True yield test_mask def _iter_test_indices(self): """Generates integer indices corresponding to test sets.""" raise NotImplementedError def _empty_mask(self): return np.zeros(self.n, dtype=np.bool) class LeaveOneOut(_PartitionIterator): """Leave-One-Out cross validation iterator. Provides train/test indices to split data in train test sets. Each sample is used once as a test set (singleton) while the remaining samples form the training set. Note: ``LeaveOneOut(n)`` is equivalent to ``KFold(n, n_folds=n)`` and ``LeavePOut(n, p=1)``. Due to the high number of test sets (which is the same as the number of samples) this cross validation method can be very costly. For large datasets one should favor KFold, StratifiedKFold or ShuffleSplit. Parameters ---------- n : int Total number of elements in dataset. Examples -------- >>> from sklearn import cross_validation >>> X = np.array([[1, 2], [3, 4]]) >>> y = np.array([1, 2]) >>> loo = cross_validation.LeaveOneOut(2) >>> len(loo) 2 >>> print(loo) sklearn.cross_validation.LeaveOneOut(n=2) >>> for train_index, test_index in loo: ... print("TRAIN:", train_index, "TEST:", test_index) ... X_train, X_test = X[train_index], X[test_index] ... y_train, y_test = y[train_index], y[test_index] ... print(X_train, X_test, y_train, y_test) TRAIN: [1] TEST: [0] [[3 4]] [[1 2]] [2] [1] TRAIN: [0] TEST: [1] [[1 2]] [[3 4]] [1] [2] See also -------- LeaveOneLabelOut for splitting the data according to explicit, domain-specific stratification of the dataset. """ def _iter_test_indices(self): return range(self.n) def __repr__(self): return '%s.%s(n=%i)' % ( self.__class__.__module__, self.__class__.__name__, self.n, ) def __len__(self): return self.n class LeavePOut(_PartitionIterator): """Leave-P-Out cross validation iterator Provides train/test indices to split data in train test sets. This results in testing on all distinct samples of size p, while the remaining n - p samples form the training set in each iteration. Note: ``LeavePOut(n, p)`` is NOT equivalent to ``KFold(n, n_folds=n // p)`` which creates non-overlapping test sets. Due to the high number of iterations which grows combinatorically with the number of samples this cross validation method can be very costly. For large datasets one should favor KFold, StratifiedKFold or ShuffleSplit. Parameters ---------- n : int Total number of elements in dataset. p : int Size of the test sets. Examples -------- >>> from sklearn import cross_validation >>> X = np.array([[1, 2], [3, 4], [5, 6], [7, 8]]) >>> y = np.array([1, 2, 3, 4]) >>> lpo = cross_validation.LeavePOut(4, 2) >>> len(lpo) 6 >>> print(lpo) sklearn.cross_validation.LeavePOut(n=4, p=2) >>> for train_index, test_index in lpo: ... print("TRAIN:", train_index, "TEST:", test_index) ... X_train, X_test = X[train_index], X[test_index] ... y_train, y_test = y[train_index], y[test_index] TRAIN: [2 3] TEST: [0 1] TRAIN: [1 3] TEST: [0 2] TRAIN: [1 2] TEST: [0 3] TRAIN: [0 3] TEST: [1 2] TRAIN: [0 2] TEST: [1 3] TRAIN: [0 1] TEST: [2 3] """ def __init__(self, n, p): super(LeavePOut, self).__init__(n) self.p = p def _iter_test_indices(self): for comb in combinations(range(self.n), self.p): yield np.array(comb) def __repr__(self): return '%s.%s(n=%i, p=%i)' % ( self.__class__.__module__, self.__class__.__name__, self.n, self.p, ) def __len__(self): return int(factorial(self.n) / factorial(self.n - self.p) / factorial(self.p)) class _BaseKFold(with_metaclass(ABCMeta, _PartitionIterator)): """Base class to validate KFold approaches""" @abstractmethod def __init__(self, n, n_folds, shuffle, random_state): super(_BaseKFold, self).__init__(n) if abs(n_folds - int(n_folds)) >= np.finfo('f').eps: raise ValueError("n_folds must be an integer") self.n_folds = n_folds = int(n_folds) if n_folds <= 1: raise ValueError( "k-fold cross validation requires at least one" " train / test split by setting n_folds=2 or more," " got n_folds={0}.".format(n_folds)) if n_folds > self.n: raise ValueError( ("Cannot have number of folds n_folds={0} greater" " than the number of samples: {1}.").format(n_folds, n)) if not isinstance(shuffle, bool): raise TypeError("shuffle must be True or False;" " got {0}".format(shuffle)) self.shuffle = shuffle self.random_state = random_state class KFold(_BaseKFold): """K-Folds cross validation iterator. Provides train/test indices to split data in train test sets. Split dataset into k consecutive folds (without shuffling). Each fold is then used a validation set once while the k - 1 remaining fold form the training set. Parameters ---------- n : int Total number of elements. n_folds : int, default=3 Number of folds. Must be at least 2. shuffle : boolean, optional Whether to shuffle the data before splitting into batches. random_state : None, int or RandomState Pseudo-random number generator state used for random sampling. If None, use default numpy RNG for shuffling Examples -------- >>> from sklearn import cross_validation >>> X = np.array([[1, 2], [3, 4], [1, 2], [3, 4]]) >>> y = np.array([1, 2, 3, 4]) >>> kf = cross_validation.KFold(4, n_folds=2) >>> len(kf) 2 >>> print(kf) # doctest: +NORMALIZE_WHITESPACE sklearn.cross_validation.KFold(n=4, n_folds=2, shuffle=False, random_state=None) >>> for train_index, test_index in kf: ... print("TRAIN:", train_index, "TEST:", test_index) ... X_train, X_test = X[train_index], X[test_index] ... y_train, y_test = y[train_index], y[test_index] TRAIN: [2 3] TEST: [0 1] TRAIN: [0 1] TEST: [2 3] Notes ----- The first n % n_folds folds have size n // n_folds + 1, other folds have size n // n_folds. See also -------- StratifiedKFold: take label information into account to avoid building folds with imbalanced class distributions (for binary or multiclass classification tasks). """ def __init__(self, n, n_folds=3, shuffle=False, random_state=None): super(KFold, self).__init__(n, n_folds, shuffle, random_state) self.idxs = np.arange(n) if shuffle: rng = check_random_state(self.random_state) rng.shuffle(self.idxs) def _iter_test_indices(self): n = self.n n_folds = self.n_folds fold_sizes = (n // n_folds) * np.ones(n_folds, dtype=np.int) fold_sizes[:n % n_folds] += 1 current = 0 for fold_size in fold_sizes: start, stop = current, current + fold_size yield self.idxs[start:stop] current = stop def __repr__(self): return '%s.%s(n=%i, n_folds=%i, shuffle=%s, random_state=%s)' % ( self.__class__.__module__, self.__class__.__name__, self.n, self.n_folds, self.shuffle, self.random_state, ) def __len__(self): return self.n_folds class StratifiedKFold(_BaseKFold): """Stratified K-Folds cross validation iterator Provides train/test indices to split data in train test sets. This cross-validation object is a variation of KFold that returns stratified folds. The folds are made by preserving the percentage of samples for each class. Parameters ---------- y : array-like, [n_samples] Samples to split in K folds. n_folds : int, default=3 Number of folds. Must be at least 2. shuffle : boolean, optional Whether to shuffle each stratification of the data before splitting into batches. random_state : None, int or RandomState Pseudo-random number generator state used for random sampling. If None, use default numpy RNG for shuffling Examples -------- >>> from sklearn import cross_validation >>> X = np.array([[1, 2], [3, 4], [1, 2], [3, 4]]) >>> y = np.array([0, 0, 1, 1]) >>> skf = cross_validation.StratifiedKFold(y, n_folds=2) >>> len(skf) 2 >>> print(skf) # doctest: +NORMALIZE_WHITESPACE sklearn.cross_validation.StratifiedKFold(labels=[0 0 1 1], n_folds=2, shuffle=False, random_state=None) >>> for train_index, test_index in skf: ... print("TRAIN:", train_index, "TEST:", test_index) ... X_train, X_test = X[train_index], X[test_index] ... y_train, y_test = y[train_index], y[test_index] TRAIN: [1 3] TEST: [0 2] TRAIN: [0 2] TEST: [1 3] Notes ----- All the folds have size trunc(n_samples / n_folds), the last one has the complementary. """ def __init__(self, y, n_folds=3, shuffle=False, random_state=None): super(StratifiedKFold, self).__init__( len(y), n_folds, shuffle, random_state) y = np.asarray(y) n_samples = y.shape[0] unique_labels, y_inversed = np.unique(y, return_inverse=True) label_counts = bincount(y_inversed) min_labels = np.min(label_counts) if self.n_folds > min_labels: warnings.warn(("The least populated class in y has only %d" " members, which is too few. The minimum" " number of labels for any class cannot" " be less than n_folds=%d." % (min_labels, self.n_folds)), Warning) # don't want to use the same seed in each label's shuffle if self.shuffle: rng = check_random_state(self.random_state) else: rng = self.random_state # pre-assign each sample to a test fold index using individual KFold # splitting strategies for each label so as to respect the # balance of labels per_label_cvs = [ KFold(max(c, self.n_folds), self.n_folds, shuffle=self.shuffle, random_state=rng) for c in label_counts] test_folds = np.zeros(n_samples, dtype=np.int) for test_fold_idx, per_label_splits in enumerate(zip(*per_label_cvs)): for label, (_, test_split) in zip(unique_labels, per_label_splits): label_test_folds = test_folds[y == label] # the test split can be too big because we used # KFold(max(c, self.n_folds), self.n_folds) instead of # KFold(c, self.n_folds) to make it possible to not crash even # if the data is not 100% stratifiable for all the labels # (we use a warning instead of raising an exception) # If this is the case, let's trim it: test_split = test_split[test_split < len(label_test_folds)] label_test_folds[test_split] = test_fold_idx test_folds[y == label] = label_test_folds self.test_folds = test_folds self.y = y def _iter_test_masks(self): for i in range(self.n_folds): yield self.test_folds == i def __repr__(self): return '%s.%s(labels=%s, n_folds=%i, shuffle=%s, random_state=%s)' % ( self.__class__.__module__, self.__class__.__name__, self.y, self.n_folds, self.shuffle, self.random_state, ) def __len__(self): return self.n_folds class LeaveOneLabelOut(_PartitionIterator): """Leave-One-Label_Out cross-validation iterator Provides train/test indices to split data according to a third-party provided label. This label information can be used to encode arbitrary domain specific stratifications of the samples as integers. For instance the labels could be the year of collection of the samples and thus allow for cross-validation against time-based splits. Parameters ---------- labels : array-like of int with shape (n_samples,) Arbitrary domain-specific stratification of the data to be used to draw the splits. Examples -------- >>> from sklearn import cross_validation >>> X = np.array([[1, 2], [3, 4], [5, 6], [7, 8]]) >>> y = np.array([1, 2, 1, 2]) >>> labels = np.array([1, 1, 2, 2]) >>> lol = cross_validation.LeaveOneLabelOut(labels) >>> len(lol) 2 >>> print(lol) sklearn.cross_validation.LeaveOneLabelOut(labels=[1 1 2 2]) >>> for train_index, test_index in lol: ... print("TRAIN:", train_index, "TEST:", test_index) ... X_train, X_test = X[train_index], X[test_index] ... y_train, y_test = y[train_index], y[test_index] ... print(X_train, X_test, y_train, y_test) TRAIN: [2 3] TEST: [0 1] [[5 6] [7 8]] [[1 2] [3 4]] [1 2] [1 2] TRAIN: [0 1] TEST: [2 3] [[1 2] [3 4]] [[5 6] [7 8]] [1 2] [1 2] """ def __init__(self, labels): super(LeaveOneLabelOut, self).__init__(len(labels)) # We make a copy of labels to avoid side-effects during iteration self.labels = np.array(labels, copy=True) self.unique_labels = np.unique(labels) self.n_unique_labels = len(self.unique_labels) def _iter_test_masks(self): for i in self.unique_labels: yield self.labels == i def __repr__(self): return '%s.%s(labels=%s)' % ( self.__class__.__module__, self.__class__.__name__, self.labels, ) def __len__(self): return self.n_unique_labels class LeavePLabelOut(_PartitionIterator): """Leave-P-Label_Out cross-validation iterator Provides train/test indices to split data according to a third-party provided label. This label information can be used to encode arbitrary domain specific stratifications of the samples as integers. For instance the labels could be the year of collection of the samples and thus allow for cross-validation against time-based splits. The difference between LeavePLabelOut and LeaveOneLabelOut is that the former builds the test sets with all the samples assigned to ``p`` different values of the labels while the latter uses samples all assigned the same labels. Parameters ---------- labels : array-like of int with shape (n_samples,) Arbitrary domain-specific stratification of the data to be used to draw the splits. p : int Number of samples to leave out in the test split. Examples -------- >>> from sklearn import cross_validation >>> X = np.array([[1, 2], [3, 4], [5, 6]]) >>> y = np.array([1, 2, 1]) >>> labels = np.array([1, 2, 3]) >>> lpl = cross_validation.LeavePLabelOut(labels, p=2) >>> len(lpl) 3 >>> print(lpl) sklearn.cross_validation.LeavePLabelOut(labels=[1 2 3], p=2) >>> for train_index, test_index in lpl: ... print("TRAIN:", train_index, "TEST:", test_index) ... X_train, X_test = X[train_index], X[test_index] ... y_train, y_test = y[train_index], y[test_index] ... print(X_train, X_test, y_train, y_test) TRAIN: [2] TEST: [0 1] [[5 6]] [[1 2] [3 4]] [1] [1 2] TRAIN: [1] TEST: [0 2] [[3 4]] [[1 2] [5 6]] [2] [1 1] TRAIN: [0] TEST: [1 2] [[1 2]] [[3 4] [5 6]] [1] [2 1] """ def __init__(self, labels, p): # We make a copy of labels to avoid side-effects during iteration super(LeavePLabelOut, self).__init__(len(labels)) self.labels = np.array(labels, copy=True) self.unique_labels = np.unique(labels) self.n_unique_labels = len(self.unique_labels) self.p = p def _iter_test_masks(self): comb = combinations(range(self.n_unique_labels), self.p) for idx in comb: test_index = self._empty_mask() idx = np.array(idx) for l in self.unique_labels[idx]: test_index[self.labels == l] = True yield test_index def __repr__(self): return '%s.%s(labels=%s, p=%s)' % ( self.__class__.__module__, self.__class__.__name__, self.labels, self.p, ) def __len__(self): return int(factorial(self.n_unique_labels) / factorial(self.n_unique_labels - self.p) / factorial(self.p)) class BaseShuffleSplit(with_metaclass(ABCMeta)): """Base class for ShuffleSplit and StratifiedShuffleSplit""" def __init__(self, n, n_iter=10, test_size=0.1, train_size=None, random_state=None): self.n = n self.n_iter = n_iter self.test_size = test_size self.train_size = train_size self.random_state = random_state self.n_train, self.n_test = _validate_shuffle_split(n, test_size, train_size) def __iter__(self): for train, test in self._iter_indices(): yield train, test return @abstractmethod def _iter_indices(self): """Generate (train, test) indices""" class ShuffleSplit(BaseShuffleSplit): """Random permutation cross-validation iterator. Yields indices to split data into training and test sets. Note: contrary to other cross-validation strategies, random splits do not guarantee that all folds will be different, although this is still very likely for sizeable datasets. Parameters ---------- n : int Total number of elements in the dataset. n_iter : int (default 10) Number of re-shuffling & splitting iterations. test_size : float (default 0.1), int, or None If float, should be between 0.0 and 1.0 and represent the proportion of the dataset to include in the test split. If int, represents the absolute number of test samples. If None, the value is automatically set to the complement of the train size. train_size : float, int, or None (default is None) If float, should be between 0.0 and 1.0 and represent the proportion of the dataset to include in the train split. If int, represents the absolute number of train samples. If None, the value is automatically set to the complement of the test size. random_state : int or RandomState Pseudo-random number generator state used for random sampling. Examples -------- >>> from sklearn import cross_validation >>> rs = cross_validation.ShuffleSplit(4, n_iter=3, ... test_size=.25, random_state=0) >>> len(rs) 3 >>> print(rs) ... # doctest: +ELLIPSIS ShuffleSplit(4, n_iter=3, test_size=0.25, ...) >>> for train_index, test_index in rs: ... print("TRAIN:", train_index, "TEST:", test_index) ... TRAIN: [3 1 0] TEST: [2] TRAIN: [2 1 3] TEST: [0] TRAIN: [0 2 1] TEST: [3] >>> rs = cross_validation.ShuffleSplit(4, n_iter=3, ... train_size=0.5, test_size=.25, random_state=0) >>> for train_index, test_index in rs: ... print("TRAIN:", train_index, "TEST:", test_index) ... TRAIN: [3 1] TEST: [2] TRAIN: [2 1] TEST: [0] TRAIN: [0 2] TEST: [3] """ def _iter_indices(self): rng = check_random_state(self.random_state) for i in range(self.n_iter): # random partition permutation = rng.permutation(self.n) ind_test = permutation[:self.n_test] ind_train = permutation[self.n_test:self.n_test + self.n_train] yield ind_train, ind_test def __repr__(self): return ('%s(%d, n_iter=%d, test_size=%s, ' 'random_state=%s)' % ( self.__class__.__name__, self.n, self.n_iter, str(self.test_size), self.random_state, )) def __len__(self): return self.n_iter def _validate_shuffle_split(n, test_size, train_size): if test_size is None and train_size is None: raise ValueError( 'test_size and train_size can not both be None') if test_size is not None: if np.asarray(test_size).dtype.kind == 'f': if test_size >= 1.: raise ValueError( 'test_size=%f should be smaller ' 'than 1.0 or be an integer' % test_size) elif np.asarray(test_size).dtype.kind == 'i': if test_size >= n: raise ValueError( 'test_size=%d should be smaller ' 'than the number of samples %d' % (test_size, n)) else: raise ValueError("Invalid value for test_size: %r" % test_size) if train_size is not None: if np.asarray(train_size).dtype.kind == 'f': if train_size >= 1.: raise ValueError("train_size=%f should be smaller " "than 1.0 or be an integer" % train_size) elif np.asarray(test_size).dtype.kind == 'f' and \ train_size + test_size > 1.: raise ValueError('The sum of test_size and train_size = %f, ' 'should be smaller than 1.0. Reduce ' 'test_size and/or train_size.' % (train_size + test_size)) elif np.asarray(train_size).dtype.kind == 'i': if train_size >= n: raise ValueError("train_size=%d should be smaller " "than the number of samples %d" % (train_size, n)) else: raise ValueError("Invalid value for train_size: %r" % train_size) if np.asarray(test_size).dtype.kind == 'f': n_test = ceil(test_size * n) elif np.asarray(test_size).dtype.kind == 'i': n_test = float(test_size) if train_size is None: n_train = n - n_test else: if np.asarray(train_size).dtype.kind == 'f': n_train = floor(train_size * n) else: n_train = float(train_size) if test_size is None: n_test = n - n_train if n_train + n_test > n: raise ValueError('The sum of train_size and test_size = %d, ' 'should be smaller than the number of ' 'samples %d. Reduce test_size and/or ' 'train_size.' % (n_train + n_test, n)) return int(n_train), int(n_test) class StratifiedShuffleSplit(BaseShuffleSplit): """Stratified ShuffleSplit cross validation iterator Provides train/test indices to split data in train test sets. This cross-validation object is a merge of StratifiedKFold and ShuffleSplit, which returns stratified randomized folds. The folds are made by preserving the percentage of samples for each class. Note: like the ShuffleSplit strategy, stratified random splits do not guarantee that all folds will be different, although this is still very likely for sizeable datasets. Parameters ---------- y : array, [n_samples] Labels of samples. n_iter : int (default 10) Number of re-shuffling & splitting iterations. test_size : float (default 0.1), int, or None If float, should be between 0.0 and 1.0 and represent the proportion of the dataset to include in the test split. If int, represents the absolute number of test samples. If None, the value is automatically set to the complement of the train size. train_size : float, int, or None (default is None) If float, should be between 0.0 and 1.0 and represent the proportion of the dataset to include in the train split. If int, represents the absolute number of train samples. If None, the value is automatically set to the complement of the test size. random_state : int or RandomState Pseudo-random number generator state used for random sampling. Examples -------- >>> from sklearn.cross_validation import StratifiedShuffleSplit >>> X = np.array([[1, 2], [3, 4], [1, 2], [3, 4]]) >>> y = np.array([0, 0, 1, 1]) >>> sss = StratifiedShuffleSplit(y, 3, test_size=0.5, random_state=0) >>> len(sss) 3 >>> print(sss) # doctest: +ELLIPSIS StratifiedShuffleSplit(labels=[0 0 1 1], n_iter=3, ...) >>> for train_index, test_index in sss: ... print("TRAIN:", train_index, "TEST:", test_index) ... X_train, X_test = X[train_index], X[test_index] ... y_train, y_test = y[train_index], y[test_index] TRAIN: [1 2] TEST: [3 0] TRAIN: [0 2] TEST: [1 3] TRAIN: [0 2] TEST: [3 1] """ def __init__(self, y, n_iter=10, test_size=0.1, train_size=None, random_state=None): super(StratifiedShuffleSplit, self).__init__( len(y), n_iter, test_size, train_size, random_state) self.y = np.array(y) self.classes, self.y_indices = np.unique(y, return_inverse=True) n_cls = self.classes.shape[0] if np.min(bincount(self.y_indices)) < 2: raise ValueError("The least populated class in y has only 1" " member, which is too few. The minimum" " number of labels for any class cannot" " be less than 2.") if self.n_train < n_cls: raise ValueError('The train_size = %d should be greater or ' 'equal to the number of classes = %d' % (self.n_train, n_cls)) if self.n_test < n_cls: raise ValueError('The test_size = %d should be greater or ' 'equal to the number of classes = %d' % (self.n_test, n_cls)) def _iter_indices(self): rng = check_random_state(self.random_state) cls_count = bincount(self.y_indices) p_i = cls_count / float(self.n) n_i = np.round(self.n_train * p_i).astype(int) t_i = np.minimum(cls_count - n_i, np.round(self.n_test * p_i).astype(int)) for n in range(self.n_iter): train = [] test = [] for i, cls in enumerate(self.classes): permutation = rng.permutation(cls_count[i]) cls_i = np.where((self.y == cls))[0][permutation] train.extend(cls_i[:n_i[i]]) test.extend(cls_i[n_i[i]:n_i[i] + t_i[i]]) # Because of rounding issues (as n_train and n_test are not # dividers of the number of elements per class), we may end # up here with less samples in train and test than asked for. if len(train) < self.n_train or len(test) < self.n_test: # We complete by affecting randomly the missing indexes missing_idx = np.where(bincount(train + test, minlength=len(self.y)) == 0, )[0] missing_idx = rng.permutation(missing_idx) train.extend(missing_idx[:(self.n_train - len(train))]) test.extend(missing_idx[-(self.n_test - len(test)):]) train = rng.permutation(train) test = rng.permutation(test) yield train, test def __repr__(self): return ('%s(labels=%s, n_iter=%d, test_size=%s, ' 'random_state=%s)' % ( self.__class__.__name__, self.y, self.n_iter, str(self.test_size), self.random_state, )) def __len__(self): return self.n_iter class PredefinedSplit(_PartitionIterator): """Predefined split cross validation iterator Splits the data into training/test set folds according to a predefined scheme. Each sample can be assigned to at most one test set fold, as specified by the user through the ``test_fold`` parameter. Parameters ---------- test_fold : "array-like, shape (n_samples,) test_fold[i] gives the test set fold of sample i. A value of -1 indicates that the corresponding sample is not part of any test set folds, but will instead always be put into the training fold. Examples -------- >>> from sklearn.cross_validation import PredefinedSplit >>> X = np.array([[1, 2], [3, 4], [1, 2], [3, 4]]) >>> y = np.array([0, 0, 1, 1]) >>> ps = PredefinedSplit(test_fold=[0, 1, -1, 1]) >>> len(ps) 2 >>> print(ps) # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS sklearn.cross_validation.PredefinedSplit(test_fold=[ 0 1 -1 1]) >>> for train_index, test_index in ps: ... print("TRAIN:", train_index, "TEST:", test_index) ... X_train, X_test = X[train_index], X[test_index] ... y_train, y_test = y[train_index], y[test_index] TRAIN: [1 2 3] TEST: [0] TRAIN: [0 2] TEST: [1 3] """ def __init__(self, test_fold): super(PredefinedSplit, self).__init__(len(test_fold)) self.test_fold = np.array(test_fold, dtype=np.int) self.test_fold = column_or_1d(self.test_fold) self.unique_folds = np.unique(self.test_fold) self.unique_folds = self.unique_folds[self.unique_folds != -1] def _iter_test_indices(self): for f in self.unique_folds: yield np.where(self.test_fold == f)[0] def __repr__(self): return '%s.%s(test_fold=%s)' % ( self.__class__.__module__, self.__class__.__name__, self.test_fold) def __len__(self): return len(self.unique_folds) ############################################################################## def _index_param_value(X, v, indices): """Private helper function for parameter value indexing.""" if not _is_arraylike(v) or _num_samples(v) != _num_samples(X): # pass through: skip indexing return v if sp.issparse(v): v = v.tocsr() return safe_indexing(v, indices) def cross_val_predict(estimator, X, y=None, cv=None, n_jobs=1, verbose=0, fit_params=None, pre_dispatch='2*n_jobs'): """Generate cross-validated estimates for each input data point Parameters ---------- estimator : estimator object implementing 'fit' and 'predict' The object to use to fit the data. X : array-like The data to fit. Can be, for example a list, or an array at least 2d. y : array-like, optional, default: None The target variable to try to predict in the case of supervised learning. cv : cross-validation generator or int, optional, default: None A cross-validation generator to use. If int, determines the number of folds in StratifiedKFold if y is binary or multiclass and estimator is a classifier, or the number of folds in KFold otherwise. If None, it is equivalent to cv=3. This generator must include all elements in the test set exactly once. Otherwise, a ValueError is raised. n_jobs : integer, optional The number of CPUs to use to do the computation. -1 means 'all CPUs'. verbose : integer, optional The verbosity level. fit_params : dict, optional Parameters to pass to the fit method of the estimator. pre_dispatch : int, or string, optional Controls the number of jobs that get dispatched during parallel execution. Reducing this number can be useful to avoid an explosion of memory consumption when more jobs get dispatched than CPUs can process. This parameter can be: - None, in which case all the jobs are immediately created and spawned. Use this for lightweight and fast-running jobs, to avoid delays due to on-demand spawning of the jobs - An int, giving the exact number of total jobs that are spawned - A string, giving an expression as a function of n_jobs, as in '2*n_jobs' Returns ------- preds : ndarray This is the result of calling 'predict' """ X, y = indexable(X, y) cv = _check_cv(cv, X, y, classifier=is_classifier(estimator)) # We clone the estimator to make sure that all the folds are # independent, and that it is pickle-able. parallel = Parallel(n_jobs=n_jobs, verbose=verbose, pre_dispatch=pre_dispatch) preds_blocks = parallel(delayed(_fit_and_predict)(clone(estimator), X, y, train, test, verbose, fit_params) for train, test in cv) p = np.concatenate([p for p, _ in preds_blocks]) locs = np.concatenate([loc for _, loc in preds_blocks]) if not _check_is_partition(locs, X.shape[0]): raise ValueError('cross_val_predict only works for partitions') preds = p.copy() preds[locs] = p return preds def _fit_and_predict(estimator, X, y, train, test, verbose, fit_params): """Fit estimator and predict values for a given dataset split. Parameters ---------- estimator : estimator object implementing 'fit' and 'predict' The object to use to fit the data. X : array-like of shape at least 2D The data to fit. y : array-like, optional, default: None The target variable to try to predict in the case of supervised learning. train : array-like, shape (n_train_samples,) Indices of training samples. test : array-like, shape (n_test_samples,) Indices of test samples. verbose : integer The verbosity level. fit_params : dict or None Parameters that will be passed to ``estimator.fit``. Returns ------- preds : sequence Result of calling 'estimator.predict' test : array-like This is the value of the test parameter """ # Adjust length of sample weights fit_params = fit_params if fit_params is not None else {} fit_params = dict([(k, _index_param_value(X, v, train)) for k, v in fit_params.items()]) X_train, y_train = _safe_split(estimator, X, y, train) X_test, _ = _safe_split(estimator, X, y, test, train) if y_train is None: estimator.fit(X_train, **fit_params) else: estimator.fit(X_train, y_train, **fit_params) preds = estimator.predict(X_test) return preds, test def _check_is_partition(locs, n): """Check whether locs is a reordering of the array np.arange(n) Parameters ---------- locs : ndarray integer array to test n : int number of expected elements Returns ------- is_partition : bool True iff sorted(locs) is range(n) """ if len(locs) != n: return False hit = np.zeros(n, bool) hit[locs] = True if not np.all(hit): return False return True def cross_val_score(estimator, X, y=None, scoring=None, cv=None, n_jobs=1, verbose=0, fit_params=None, pre_dispatch='2*n_jobs'): """Evaluate a score by cross-validation Parameters ---------- estimator : estimator object implementing 'fit' The object to use to fit the data. X : array-like The data to fit. Can be, for example a list, or an array at least 2d. y : array-like, optional, default: None The target variable to try to predict in the case of supervised learning. scoring : string, callable or None, optional, default: None A string (see model evaluation documentation) or a scorer callable object / function with signature ``scorer(estimator, X, y)``. cv : cross-validation generator or int, optional, default: None A cross-validation generator to use. If int, determines the number of folds in StratifiedKFold if y is binary or multiclass and estimator is a classifier, or the number of folds in KFold otherwise. If None, it is equivalent to cv=3. n_jobs : integer, optional The number of CPUs to use to do the computation. -1 means 'all CPUs'. verbose : integer, optional The verbosity level. fit_params : dict, optional Parameters to pass to the fit method of the estimator. pre_dispatch : int, or string, optional Controls the number of jobs that get dispatched during parallel execution. Reducing this number can be useful to avoid an explosion of memory consumption when more jobs get dispatched than CPUs can process. This parameter can be: - None, in which case all the jobs are immediately created and spawned. Use this for lightweight and fast-running jobs, to avoid delays due to on-demand spawning of the jobs - An int, giving the exact number of total jobs that are spawned - A string, giving an expression as a function of n_jobs, as in '2*n_jobs' Returns ------- scores : array of float, shape=(len(list(cv)),) Array of scores of the estimator for each run of the cross validation. """ X, y = indexable(X, y) cv = _check_cv(cv, X, y, classifier=is_classifier(estimator)) scorer = check_scoring(estimator, scoring=scoring) # We clone the estimator to make sure that all the folds are # independent, and that it is pickle-able. parallel = Parallel(n_jobs=n_jobs, verbose=verbose, pre_dispatch=pre_dispatch) scores = parallel(delayed(_fit_and_score)(clone(estimator), X, y, scorer, train, test, verbose, None, fit_params) for train, test in cv) return np.array(scores)[:, 0] class FitFailedWarning(RuntimeWarning): pass def _fit_and_score(estimator, X, y, scorer, train, test, verbose, parameters, fit_params, return_train_score=False, return_parameters=False, error_score='raise'): """Fit estimator and compute scores for a given dataset split. Parameters ---------- estimator : estimator object implementing 'fit' The object to use to fit the data. X : array-like of shape at least 2D The data to fit. y : array-like, optional, default: None The target variable to try to predict in the case of supervised learning. scorer : callable A scorer callable object / function with signature ``scorer(estimator, X, y)``. train : array-like, shape (n_train_samples,) Indices of training samples. test : array-like, shape (n_test_samples,) Indices of test samples. verbose : integer The verbosity level. error_score : 'raise' (default) or numeric Value to assign to the score if an error occurs in estimator fitting. If set to 'raise', the error is raised. If a numeric value is given, FitFailedWarning is raised. This parameter does not affect the refit step, which will always raise the error. parameters : dict or None Parameters to be set on the estimator. fit_params : dict or None Parameters that will be passed to ``estimator.fit``. return_train_score : boolean, optional, default: False Compute and return score on training set. return_parameters : boolean, optional, default: False Return parameters that has been used for the estimator. Returns ------- train_score : float, optional Score on training set, returned only if `return_train_score` is `True`. test_score : float Score on test set. n_test_samples : int Number of test samples. scoring_time : float Time spent for fitting and scoring in seconds. parameters : dict or None, optional The parameters that have been evaluated. """ if verbose > 1: if parameters is None: msg = "no parameters to be set" else: msg = '%s' % (', '.join('%s=%s' % (k, v) for k, v in parameters.items())) print("[CV] %s %s" % (msg, (64 - len(msg)) * '.')) # Adjust length of sample weights fit_params = fit_params if fit_params is not None else {} fit_params = dict([(k, _index_param_value(X, v, train)) for k, v in fit_params.items()]) if parameters is not None: estimator.set_params(**parameters) start_time = time.time() X_train, y_train = _safe_split(estimator, X, y, train) X_test, y_test = _safe_split(estimator, X, y, test, train) try: if y_train is None: estimator.fit(X_train, **fit_params) else: estimator.fit(X_train, y_train, **fit_params) except Exception as e: if error_score == 'raise': raise elif isinstance(error_score, numbers.Number): test_score = error_score if return_train_score: train_score = error_score warnings.warn("Classifier fit failed. The score on this train-test" " partition for these parameters will be set to %f. " "Details: \n%r" % (error_score, e), FitFailedWarning) else: raise ValueError("error_score must be the string 'raise' or a" " numeric value. (Hint: if using 'raise', please" " make sure that it has been spelled correctly.)" ) else: test_score = _score(estimator, X_test, y_test, scorer) if return_train_score: train_score = _score(estimator, X_train, y_train, scorer) scoring_time = time.time() - start_time if verbose > 2: msg += ", score=%f" % test_score if verbose > 1: end_msg = "%s -%s" % (msg, logger.short_format_time(scoring_time)) print("[CV] %s %s" % ((64 - len(end_msg)) * '.', end_msg)) ret = [train_score] if return_train_score else [] ret.extend([test_score, _num_samples(X_test), scoring_time]) if return_parameters: ret.append(parameters) return ret def _safe_split(estimator, X, y, indices, train_indices=None): """Create subset of dataset and properly handle kernels.""" if hasattr(estimator, 'kernel') and callable(estimator.kernel): # cannot compute the kernel values with custom function raise ValueError("Cannot use a custom kernel function. " "Precompute the kernel matrix instead.") if not hasattr(X, "shape"): if getattr(estimator, "_pairwise", False): raise ValueError("Precomputed kernels or affinity matrices have " "to be passed as arrays or sparse matrices.") X_subset = [X[idx] for idx in indices] else: if getattr(estimator, "_pairwise", False): # X is a precomputed square kernel matrix if X.shape[0] != X.shape[1]: raise ValueError("X should be a square kernel matrix") if train_indices is None: X_subset = X[np.ix_(indices, indices)] else: X_subset = X[np.ix_(indices, train_indices)] else: X_subset = safe_indexing(X, indices) if y is not None: y_subset = safe_indexing(y, indices) else: y_subset = None return X_subset, y_subset def _score(estimator, X_test, y_test, scorer): """Compute the score of an estimator on a given test set.""" if y_test is None: score = scorer(estimator, X_test) else: score = scorer(estimator, X_test, y_test) if not isinstance(score, numbers.Number): raise ValueError("scoring must return a number, got %s (%s) instead." % (str(score), type(score))) return score def _permutation_test_score(estimator, X, y, cv, scorer): """Auxiliary function for permutation_test_score""" avg_score = [] for train, test in cv: estimator.fit(X[train], y[train]) avg_score.append(scorer(estimator, X[test], y[test])) return np.mean(avg_score) def _shuffle(y, labels, random_state): """Return a shuffled copy of y eventually shuffle among same labels.""" if labels is None: ind = random_state.permutation(len(y)) else: ind = np.arange(len(labels)) for label in np.unique(labels): this_mask = (labels == label) ind[this_mask] = random_state.permutation(ind[this_mask]) return y[ind] def check_cv(cv, X=None, y=None, classifier=False): """Input checker utility for building a CV in a user friendly way. Parameters ---------- cv : int, a cv generator instance, or None The input specifying which cv generator to use. It can be an integer, in which case it is the number of folds in a KFold, None, in which case 3 fold is used, or another object, that will then be used as a cv generator. X : array-like The data the cross-val object will be applied on. y : array-like The target variable for a supervised learning problem. classifier : boolean optional Whether the task is a classification task, in which case stratified KFold will be used. Returns ------- checked_cv: a cross-validation generator instance. The return value is guaranteed to be a cv generator instance, whatever the input type. """ return _check_cv(cv, X=X, y=y, classifier=classifier) def _check_cv(cv, X=None, y=None, classifier=False): # This exists for internal use while indices is being deprecated. is_sparse = sp.issparse(X) if cv is None: cv = 3 if isinstance(cv, numbers.Integral): if classifier: if type_of_target(y) in ['binary', 'multiclass']: cv = StratifiedKFold(y, cv) else: cv = KFold(_num_samples(y), cv) else: if not is_sparse: n_samples = len(X) else: n_samples = X.shape[0] cv = KFold(n_samples, cv) return cv def permutation_test_score(estimator, X, y, cv=None, n_permutations=100, n_jobs=1, labels=None, random_state=0, verbose=0, scoring=None): """Evaluate the significance of a cross-validated score with permutations Parameters ---------- estimator : estimator object implementing 'fit' The object to use to fit the data. X : array-like of shape at least 2D The data to fit. y : array-like The target variable to try to predict in the case of supervised learning. scoring : string, callable or None, optional, default: None A string (see model evaluation documentation) or a scorer callable object / function with signature ``scorer(estimator, X, y)``. cv : integer or cross-validation generator, optional If an integer is passed, it is the number of fold (default 3). Specific cross-validation objects can be passed, see sklearn.cross_validation module for the list of possible objects. n_permutations : integer, optional Number of times to permute ``y``. n_jobs : integer, optional The number of CPUs to use to do the computation. -1 means 'all CPUs'. labels : array-like of shape [n_samples] (optional) Labels constrain the permutation among groups of samples with a same label. random_state : RandomState or an int seed (0 by default) A random number generator instance to define the state of the random permutations generator. verbose : integer, optional The verbosity level. Returns ------- score : float The true score without permuting targets. permutation_scores : array, shape (n_permutations,) The scores obtained for each permutations. pvalue : float The returned value equals p-value if `scoring` returns bigger numbers for better scores (e.g., accuracy_score). If `scoring` is rather a loss function (i.e. when lower is better such as with `mean_squared_error`) then this is actually the complement of the p-value: 1 - p-value. Notes ----- This function implements Test 1 in: Ojala and Garriga. Permutation Tests for Studying Classifier Performance. The Journal of Machine Learning Research (2010) vol. 11 """ X, y = indexable(X, y) cv = _check_cv(cv, X, y, classifier=is_classifier(estimator)) scorer = check_scoring(estimator, scoring=scoring) random_state = check_random_state(random_state) # We clone the estimator to make sure that all the folds are # independent, and that it is pickle-able. score = _permutation_test_score(clone(estimator), X, y, cv, scorer) permutation_scores = Parallel(n_jobs=n_jobs, verbose=verbose)( delayed(_permutation_test_score)( clone(estimator), X, _shuffle(y, labels, random_state), cv, scorer) for _ in range(n_permutations)) permutation_scores = np.array(permutation_scores) pvalue = (np.sum(permutation_scores >= score) + 1.0) / (n_permutations + 1) return score, permutation_scores, pvalue permutation_test_score.__test__ = False # to avoid a pb with nosetests def train_test_split(*arrays, **options): """Split arrays or matrices into random train and test subsets Quick utility that wraps input validation and ``next(iter(ShuffleSplit(n_samples)))`` and application to input data into a single call for splitting (and optionally subsampling) data in a oneliner. Parameters ---------- *arrays : sequence of arrays or scipy.sparse matrices with same shape[0] Python lists or tuples occurring in arrays are converted to 1D numpy arrays. test_size : float, int, or None (default is None) If float, should be between 0.0 and 1.0 and represent the proportion of the dataset to include in the test split. If int, represents the absolute number of test samples. If None, the value is automatically set to the complement of the train size. If train size is also None, test size is set to 0.25. train_size : float, int, or None (default is None) If float, should be between 0.0 and 1.0 and represent the proportion of the dataset to include in the train split. If int, represents the absolute number of train samples. If None, the value is automatically set to the complement of the test size. random_state : int or RandomState Pseudo-random number generator state used for random sampling. Returns ------- splitting : list of arrays, length=2 * len(arrays) List containing train-test split of input array. Examples -------- >>> import numpy as np >>> from sklearn.cross_validation import train_test_split >>> a, b = np.arange(10).reshape((5, 2)), range(5) >>> a array([[0, 1], [2, 3], [4, 5], [6, 7], [8, 9]]) >>> list(b) [0, 1, 2, 3, 4] >>> a_train, a_test, b_train, b_test = train_test_split( ... a, b, test_size=0.33, random_state=42) ... >>> a_train array([[4, 5], [0, 1], [6, 7]]) >>> b_train [2, 0, 3] >>> a_test array([[2, 3], [8, 9]]) >>> b_test [1, 4] """ n_arrays = len(arrays) if n_arrays == 0: raise ValueError("At least one array required as input") test_size = options.pop('test_size', None) train_size = options.pop('train_size', None) random_state = options.pop('random_state', None) dtype = options.pop('dtype', None) if dtype is not None: warnings.warn("dtype option is ignored and will be removed in 0.18.", DeprecationWarning) allow_nd = options.pop('allow_nd', None) allow_lists = options.pop('allow_lists', None) if allow_lists is not None: warnings.warn("The allow_lists option is deprecated and will be " "assumed True in 0.18 and removed.", DeprecationWarning) if options: raise TypeError("Invalid parameters passed: %s" % str(options)) if allow_nd is not None: warnings.warn("The allow_nd option is deprecated and will be " "assumed True in 0.18 and removed.", DeprecationWarning) if allow_lists is False or allow_nd is False: arrays = [check_array(x, 'csr', allow_nd=allow_nd, force_all_finite=False, ensure_2d=False) if x is not None else x for x in arrays] if test_size is None and train_size is None: test_size = 0.25 arrays = indexable(*arrays) n_samples = _num_samples(arrays[0]) cv = ShuffleSplit(n_samples, test_size=test_size, train_size=train_size, random_state=random_state) train, test = next(iter(cv)) return list(chain.from_iterable((safe_indexing(a, train), safe_indexing(a, test)) for a in arrays)) train_test_split.__test__ = False # to avoid a pb with nosetests
bsd-3-clause
JuliaSprenger/python-neo
neo/utils/datasets.py
5
2427
""" Utility functions to retrieve public datasets. """ import os from pathlib import Path default_testing_repo = 'https://gin.g-node.org/NeuralEnsemble/ephy_testing_data' global local_testing_data_folder if os.getenv('EPHY_TESTING_DATA_FOLDER', default=None) is not None: local_testing_data_folder = Path(os.getenv('EPHY_TESTING_DATA_FOLDER')) else: # set in home local_testing_data_folder = Path.home() / 'ephy_testing_data' def get_local_testing_data_folder(): global local_testing_data_folder return local_testing_data_folder def download_dataset(repo=default_testing_repo, remote_path=None, local_folder=None): """ Download a dataset with datalad client. By default it download the "NeuralEnsemble/ephy_testing_data" on gin platform which is used for neo testing. Usage: download_dataset( repo='https://gin.g-node.org/NeuralEnsemble/ephy_testing_data', remote_path='blackrock/blackrock_2_1', local_folder='/home/myname/Documents/') Parameters ---------- repo: str The url of the repo. If None then 'https://gin.g-node.org/NeuralEnsemble/ephy_testing_data' is used remote_path: str of Path The distant path to retrieve (file or folder) local_folder: str or Path or None The local folder where to download the data. If None, a default project testing folder is used. Default: None Returns ------- local_path: The local path of the downloaded file or folder """ import datalad.api from datalad.support.gitrepo import GitRepo if local_folder is None: global local_testing_data_folder local_folder = local_testing_data_folder local_folder = Path(local_folder) if local_folder.exists() and GitRepo.is_valid_repo(local_folder): dataset = datalad.api.Dataset(path=local_folder) # make sure git repo is in clean state repo = dataset.repo repo.call_git(['checkout', '--force', 'master']) dataset.update(merge=True) else: dataset = datalad.api.install(path=local_folder, source=repo) if remote_path is None: print('Bad boy: you have to provide "remote_path"') return dataset.get(remote_path) local_path = local_folder / remote_path return local_path
bsd-3-clause
googlearchive/rgc-models
response_model/python/population_subunits/jitter/distributed/approximate_conv.py
1
25423
# Copyright 2018 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """One-line documentation for approximate_conv module. A detailed description of approximate_conv. """ from datetime import datetime import time import sys import os.path import collections import tensorflow as tf from absl import app from absl import flags from absl import gfile from tensorflow.contrib.slim.model_deploy import DeploymentConfig, deploy from tensorflow.python.profiler.model_analyzer import PrintModelAnalysis import cPickle as pickle import matplotlib matplotlib.use('TkAgg') from matplotlib import pylab import matplotlib.pyplot as plt import numpy as np import scipy.io as sio from scipy import ndimage import retina.response_model.python.population_subunits.jitter.distributed.jitter_model as jitter_model import retina.response_model.python.population_subunits.jitter.distributed.get_data_mat as get_data_mat import random FLAGS = flags.FLAGS # flags for data locations flags.DEFINE_string('folder_name', 'experiment_jitter', 'folder where to store all the data') flags.DEFINE_string('save_location', '/home/bhaishahster/distributed3/', 'where to store logs and outputs?'); flags.DEFINE_string('data_location', '/home/retina_data/Google datasets/2016-04-21-1/data006(2016-04-21-1_data006_data006)/', 'where to take data from?') flags.DEFINE_integer('batchsz', 240*4, 'batch size for training') flags.DEFINE_integer('n_chunks',1793, 'number of data chunks') # should be 216 flags.DEFINE_integer('num_chunks_to_load', 2*6, 'number of chunks to load for 1 batch of data') flags.DEFINE_integer('train_len', 216 - 21, 'how much training length to use?') flags.DEFINE_float('step_sz', 20, 'step size for learning algorithm') flags.DEFINE_integer('max_steps', 400000, 'maximum number of steps') # random number generators initialized # removes unneccessary data variabilities while comparing algorithms flags.DEFINE_integer('np_randseed', 23, 'numpy RNG seed') flags.DEFINE_integer('randseed', 65, 'python RNG seed') # flags for model/loss specification flags.DEFINE_string('model_id', 'relu_window_mother_sfm', 'which model to fit') ## list of models here, and quick explanation flags.DEFINE_string('loss', 'poisson', 'which loss to use?') # poisson, (conditional poisson - TODO), logistic or hinge # model specific terms # useful for convolution-like models flags.DEFINE_string('architecture','1 layer', 'the architecture of model to be learnt') # options : 1 layer, 2 layer_stimulus (stimulus put to lower dimensions), # 2 layer_delta (two layered architecture of delta weights) # stimulus downsampling options - if architecture = '2 layer_stimulus', # then downsample stimulus with these windows and strides. flags.DEFINE_integer('stim_downsample_window', 4, 'How to down sample the stimulus') flags.DEFINE_integer('stim_downsample_stride',4, 'stride to use to downsample stimulus') # weight windows on stimulus for subunits flags.DEFINE_integer('window', 16, 'size of window for each subunit in relu_window model') flags.DEFINE_integer('stride', 16, 'stride for relu_window') flags.DEFINE_integer('su_channels', 3, 'number of color channels each subunit should take input from') # some models need regularization of parameters flags.DEFINE_float('lam_w', 0.000, 'sparsitiy regularization of w') flags.DEFINE_float('lam_a', 0.000, 'sparsitiy regularization of a') # dataset specific flags.DEFINE_float('n_cells',1, 'number of cells in the dataset') # distributed TF specific flags flags.DEFINE_string("master", "local", """BNS name of the TensorFlow master to use.""") flags.DEFINE_integer("task", 0, """Task id of the replica running the training.""") flags.DEFINE_integer("ps_tasks", 0, """Number of tasks in the ps job. If 0 no ps job is used.""") #flags.DEFINE_integer("is_eval", 0, """If this is eval worker""") # specs for multi-gpu training tf.app.flags.DEFINE_string('config_params', '', """Deployment config params.""") # parameters used for synchronous updating of gradients from multiple workers flags.DEFINE_boolean("sync_replicas", False, "Use the sync_replicas (synchronized replicas) mode, " "wherein the parameter updates from workers are aggregated " "before applied to avoid stale gradients") flags.DEFINE_integer("replicas_to_aggregate", None, "Number of replicas to aggregate before parameter update" "is applied (For sync_replicas mode only; default: " "num_workers)") # learn or analyze a particular fit? flags.DEFINE_integer("learn",1,"""If we learn the model or analyse it""") FLAGS = flags.FLAGS def main(argv): RunComputation() def RunComputation(): # filename for saving file if FLAGS.architecture == '2 layer_stimulus': architecture_string = ('_architecture=' + str(FLAGS.architecture) + '_stim_downsample_window=' + str(FLAGS.stim_downsample_window) + '_stim_downsample_stride=' + str(FLAGS.stim_downsample_stride)) else: architecture_string = ('_architecture=' + str(FLAGS.architecture)) short_filename = ('model=' + str(FLAGS.model_id) + '_loss='+ str(FLAGS.loss) + '_batch_sz='+ str(FLAGS.batchsz) + '_lam_w=' + str(FLAGS.lam_w) + '_step_sz'+ str(FLAGS.step_sz) + '_tlen=' + str(FLAGS.train_len) + '_window='+str(FLAGS.window) + '_stride='+str(FLAGS.stride) + str(architecture_string) + '_jitter') # make a folder with name derived from parameters of the algorithm - it saves checkpoint files and summaries used in tensorboard parent_folder = FLAGS.save_location + FLAGS.folder_name + '/' # make folder if it does not exist if not gfile.IsDirectory(parent_folder): gfile.MkDir(parent_folder) FLAGS.save_location = parent_folder + short_filename + '/' print('Does the file exist?', gfile.IsDirectory(FLAGS.save_location)) if not gfile.IsDirectory(FLAGS.save_location): gfile.MkDir(FLAGS.save_location) save_filename = FLAGS.save_location + short_filename """Main function which runs all TensorFlow computations.""" with tf.Graph().as_default() as gra: with tf.device(tf.ReplicaDeviceSetter(FLAGS.ps_tasks)): print(FLAGS.config_params) tf.logging.info(FLAGS.config_params) # set up training dataset tc_mean = get_data_mat.init_chunks(FLAGS.n_chunks) ''' # plot histogram of a training dataset stim_train, resp_train, train_len = get_data_mat.get_stim_resp('train', num_chunks=FLAGS.num_chunks_to_load) plt.hist(np.ndarray.flatten(stim_train[:,:,0:])) plt.show() plt.draw() ''' # Create computation graph. # # Graph should be fully constructed before you create supervisor. # Attempt to modify graph after supervisor is created will cause an error. with tf.name_scope('model'): if FLAGS.architecture == '1 layer': # single GPU model if False: global_step = tf.contrib.framework.create_global_step() model, stim, resp = jitter_model.approximate_conv_jitter(FLAGS.n_cells, FLAGS.lam_w, FLAGS.window, FLAGS.stride, FLAGS.step_sz, tc_mean, FLAGS.su_channels) # multiGPU model if True: model, stim, resp, global_step = jitter_model.approximate_conv_jitter_multigpu(FLAGS.n_cells, FLAGS.lam_w, FLAGS.window, FLAGS.stride, FLAGS.step_sz, tc_mean, FLAGS.su_channels, FLAGS.config_params) if FLAGS.architecture == '2 layer_stimulus': # stimulus is first smoothened to lower dimensions, then same model is applied print(' put stimulus to lower dimenstions!') model, stim, resp, global_step, stim_tuple = jitter_model.approximate_conv_jitter_multigpu_stim_lr(FLAGS.n_cells, FLAGS.lam_w, FLAGS.window, FLAGS.stride, FLAGS.step_sz, tc_mean, FLAGS.su_channels, FLAGS.config_params,FLAGS.stim_downsample_window,FLAGS.stim_downsample_stride) # Print the number of variables in graph print('Calculating model size') # Hope we do not exceed memory PrintModelAnalysis(gra, max_depth=10) #import pdb; pdb.set_trace() # Builds our summary op. summary_op = model.merged_summary # Create a Supervisor. It will take care of initialization, summaries, # checkpoints, and recovery. # # When multiple replicas of this program are running, the first one, # identified by --task=0 is the 'chief' supervisor. It is the only one # that takes case of initialization, etc. is_chief = (FLAGS.task == 0) # & (FLAGS.learn==1) print(save_filename) if FLAGS.learn==1: # use supervisor only for learning, # otherwise it messes up data as it tries to store variables while you are doing analysis sv = tf.train.Supervisor(logdir=save_filename, is_chief=is_chief, saver=tf.train.Saver(), summary_op=None, save_model_secs=100, global_step=global_step, recovery_wait_secs=5) if (is_chief and FLAGS.learn==1): tf.train.write_graph(tf.get_default_graph().as_graph_def(), save_filename, 'graph.pbtxt') # Get an initialized, and possibly recovered session. Launch the # services: Checkpointing, Summaries, step counting. # # When multiple replicas of this program are running the services are # only launched by the 'chief' replica. session_config = tf.ConfigProto( allow_soft_placement=True, log_device_placement=False) #import pdb; pdb.set_trace() sess = sv.PrepareSession(FLAGS.master, config=session_config) FitComputation(sv, sess, model, stim, resp, global_step, summary_op, stim_tuple) sv.Stop() else: # if not learn, then analyse session_config = tf.ConfigProto( allow_soft_placement=True, log_device_placement=False) with tf.Session(config=session_config) as sess: saver_var = tf.train.Saver(tf.all_variables(), keep_checkpoint_every_n_hours=float('inf')) restore_file = tf.train.latest_checkpoint(save_filename) print(restore_file) start_iter = int(restore_file.split('/')[-1].split('-')[-1]) saver_var.restore(sess, restore_file) if FLAGS.architecture == '2 layer_stimulus': AnalyseModel_lr(sess, model) else: AnalyseModel(sv, sess, model) def FitComputation(sv, sess, model, stim, resp, global_step, summary_op, stim_tuple): def Test(): # load a test chunk one at a time and compute average log-likelihood loss_batch = 0 n_test_chunks = 2 # should be 8 #len(get_data_mat.test_chunks) for ichunk in range(n_test_chunks): if get_data_mat.test_counter >=n_test_chunks: get_data_mat.test_counter = 0 stim_test, resp_test, test_len = get_data_mat.get_stim_resp(data_type='test') fd_test = {stim: np.array(stim_test,dtype='float32'), resp: np.array(resp_test,dtype='float32')} loss_batch += sess.run(model.loss_inter, feed_dict=fd_test) print_loss = loss_batch / n_test_chunks print('Test loss:%.3f' % print_loss) return print_loss # Run iterative computation in a loop. step = sess.run(global_step) is_chief = (FLAGS.task == 0) loss_avg = [] while not sv.ShouldStop(): #print('starting step') start_time = time.time() stim_train, resp_train, train_len = get_data_mat.get_stim_resp('train', num_chunks=FLAGS.num_chunks_to_load) duration = time.time() - start_time format_str = ('%s: get_data @ step %d, %.3f ' 'sec/batch)') tf.logging.info(format_str % (datetime.now(), step, duration)) print(format_str % (datetime.now(), step, duration)) #print(resp) #import pdb; pdb.set_trace() fd_train = {stim: np.array(stim_train,dtype='float32'), resp: np.array(resp_train,dtype='float32')} #print('made dict') #print('running step') start_time = time.time() _, current_loss = sess.run([model.train_step, model.loss_inter], feed_dict=fd_train) #_ = sess.run([model.train_step], feed_dict=fd_train) #current_loss = 0 loss_avg.append(current_loss) duration = time.time() - start_time format_str = ('%s: train @ step %d, %.3f ' 'sec/batch) loss = %.3f') tf.logging.info(format_str % (datetime.now(), step, duration, np.mean(np.array(loss_avg)))) print(format_str % (datetime.now(), step, duration, np.mean(np.array(loss_avg)))) if len(loss_avg) > 10: loss_avg = loss_avg[1:] #print('finished running step.') ''' from IPython.terminal.embed import InteractiveShellEmbed ipshell = InteractiveShellEmbed() ipshell() ''' if step >= FLAGS.max_steps: break if is_chief and step % 10 == 0: mean_loss = np.mean(np.array(loss_avg)) #print('making summary') start_time = time.time() summary_str = sess.run(summary_op, feed_dict=fd_train) sv.summary_computed(sess, summary_str) duration = time.time() - start_time format_str = ('%s: summary @ step %d, %.3f ' 'sec/batch), loss: %.3f') #tf.logging.info(format_str % (datetime.now(), step, duration, loss_inter_summary)) #print(format_str % (datetime.now(), step, duration, mean_loss)) loss_avg = [] # Test data loss test_loss = Test() ''' test_summary = tf.Summary() value = test_summary.value.add() value.tag = 'Test loss' value.simple_value = test_loss print('Test loss %.3f' % value.simple_value) sv.summary_computed(sess, test_summary) #print('adding summary') ''' step += 1 #print('ending step') def AnalyseModel(sv, sess, model): # analyse a 1 layered almost-convolutional model print('starting analysis') # get mother cell and print it w_fit_mother = sess.run(model.variables.w_mother) print(np.shape(w_fit_mother)) for ichannel in range(3): plt.subplot(1,3,ichannel+1) print(np.squeeze(w_fit_mother[:,:,ichannel,0])) plt.imshow(np.squeeze(w_fit_mother[:,:,ichannel,0]), cmap='gray') plt.draw() plt.show() # print a_sfm ''' a_sfm_eval = sess.run(model.ops.a_sfm) plt.plot(a_sfm_eval) print(np.shape(a_sfm_eval)) print(np.sum(a_sfm_eval ,0)) plt.show() plt.draw() ''' # plot delta subunit for 'almost convolutional - model + delta models' w_del_e = np.squeeze(sess.run(model.variables.w_del)) w_mot = sess.run(model.variables.w_mother) dimx = model.dimensions.dimx dimy = model.dimensions.dimy print(dimx, dimy) for icol in np.arange(3): icnt=1 for idimx in np.arange(dimx): print(idimx) for idimy in np.arange(dimy): w_del_flatteni = np.squeeze(w_del_e[idimx, idimy, :]) plt.subplot(dimx, dimy, icnt) wts = w_del_flatteni wh = 2*FLAGS.window+1 wts = np.reshape(wts[wh*wh*icol:wh*wh*(icol+1)],(wh,wh)) plt.imshow(np.squeeze(wts + np.squeeze(w_mot[:,:,icol])),cmap='gray') icnt=icnt+1 plt.axis('off') print(icol) plt.draw() plt.show() # plot subunits for a chosen cell w_del_e = np.squeeze(sess.run(model.variables.w_del)) w_mot = sess.run(model.variables.w_mother) a_sfm_eval = sess.run(model.ops.a_sfm) icell = 20 icol = 1 # 1 for green a_wts = a_sfm_eval[:,icell] a_thr = np.percentile(a_wts, 99) sus = np.arange(a_sfm_eval.shape[0]) chosen_su = sus[a_wts > a_thr] wh = 2 * FLAGS.window + 1 dimx = model.dimensions.dimx dimy = model.dimensions.dimy icnt=-1 isu = 0 print(chosen_su) for idimx in np.arange(dimx): print(idimx) for idimy in np.arange(dimy): icnt=icnt+1 if(a_wts[icnt]>=a_thr): print(icnt) # plot this subunit # compute 2D subunit w_del_flatteni = np.squeeze(w_del_e[idimx, idimy, :]) wts = w_del_flatteni wts = np.reshape(wts[wh * wh * icol:wh * wh * (icol + 1)], (wh, wh)) isu = isu + 1 print(isu) plt.subplot(len(chosen_su), 2, (isu - 1) * 2 + 1) plt.imshow(np.squeeze(wts + np.squeeze(w_mot[:, :, icol])), cmap='gray') plt.title(str(a_wts[icnt])) plt.subplot(len(chosen_su), 2, (isu - 1) * 2 + 2) plt.imshow(np.squeeze(wts),cmap='gray') plt.title(str(a_wts[icnt])) plt.show() plt.draw() def AnalyseModel_lr(sess, model): # analyse a 1 layered almost-convolutional model print('starting analysis') # get mother cell and print it w_fit_mother = sess.run(model.variables_lr.w_mother) print(np.shape(w_fit_mother)) for ichannel in range(1): plt.subplot(1,1,ichannel+1) print(np.squeeze(w_fit_mother[:,:,ichannel,0])) plt.imshow(np.squeeze(w_fit_mother[:,:,ichannel,0]), cmap='gray', interpolation='nearest') plt.title('Mother subunit') plt.draw() plt.show() # see how the stimulus is put to lower dimensions - plot w_stim_lr w_fit_stim_lr = sess.run(model.variables_lr.w_stim_lr) print(np.shape(w_fit_stim_lr)) for ichannel in range(3): plt.subplot(1,3,ichannel+1) print(np.squeeze(w_fit_stim_lr[:,:,ichannel,0])) plt.imshow(np.squeeze(w_fit_stim_lr[:,:,ichannel,0]), cmap='gray', interpolation='nearest') plt.title('w_stimlr: putting stimulus to lower dimensions') plt.draw() plt.show() # plot delta subunit for 'almost convolutional - model + delta models' ''' w_del_e = np.squeeze(sess.run(model.variables_lr.w_del)) w_mot = sess.run(model.variables_lr.w_mother) dimx = model.dimensions.dimx dimy = model.dimensions.dimy print(dimx, dimy) for icol in np.arange(1): icnt=1 for idimx in np.arange(dimx): print(idimx) for idimy in np.arange(dimy): w_del_flatteni = np.squeeze(w_del_e[idimx, idimy, :]) plt.subplot(dimx, dimy, icnt) #plt.subplot(6,6,icnt) wts = w_del_flatteni wh = 2*FLAGS.window+1 wts = np.reshape(wts[wh*wh*icol:wh*wh*(icol+1)],(wh,wh)) plt.imshow(np.squeeze(wts + np.squeeze(w_mot[:,:,icol])),cmap='gray', interpolation='nearest') icnt=icnt+1 plt.suptitle('w mother + w delta') print(icol) plt.draw() plt.show() ''' # plot subunits for a chosen cell w_del_e = np.squeeze(sess.run(model.variables_lr.w_del)) w_mot = sess.run(model.variables_lr.w_mother) a_model = sess.run(model.variables_lr.a) a_sfm_eval = a_model icell = 35 icol = 0 # 1 for green a_wts = a_sfm_eval[:,icell] a_thr = np.percentile(a_wts, 99.9) sus = np.arange(a_sfm_eval.shape[0]) chosen_su = sus[a_wts > a_thr] wh = 2 * FLAGS.window + 1 dimx = model.dimensions.dimx dimy = model.dimensions.dimy icnt=-1 isu = 0 print(chosen_su) for idimx in np.arange(dimx): print(idimx) for idimy in np.arange(dimy): icnt=icnt+1 if(a_wts[icnt]>=a_thr): good_sux=idimx good_suy=idimy print(icnt, idimx, idimy, a_wts[icnt]) # plot this subunit # compute 2D subunit w_del_flatteni = np.squeeze(w_del_e[idimx, idimy, :]) wts = w_del_flatteni wts = np.reshape(wts[wh * wh * icol:wh * wh * (icol + 1)], (wh, wh)) isu = isu + 1 print(isu) ax=plt.subplot(len(chosen_su), 2, (isu - 1) * 2 + 1) plt.imshow(np.squeeze(wts + np.squeeze(w_mot[:, :, icol])), cmap='gray', interpolation='nearest') #plt.title(str(a_wts[icnt])) ax.set_xticklabels([]) ax.set_yticklabels([]) ax=plt.subplot(len(chosen_su), 2, (isu - 1) * 2 + 2) plt.imshow(np.squeeze(wts),cmap='gray', interpolation='nearest') #plt.title(str(a_wts[icnt])) ax.set_xticklabels([]) ax.set_yticklabels([]) plt.show() plt.draw() ## Decode stimulus for each subunit print('Starting decode') w_stim_lr_fit = sess.run(model.variables_lr.w_stim_lr) w_mother_fit = sess.run(model.variables_lr.w_mother) a_fit = sess.run(model.variables_lr.a) w_del_fit = sess.run(model.variables_lr.w_del) bias_su_fit = sess.run(model.variables_lr.bias_su) bias_cell_fit = sess.run(model.variables_lr.bias_cell) tcmean_fit = sess.run(model.variables_lr.time_course) sux = good_sux g = tf.Graph() with g.as_default(): with tf.Session() as sess2: for suy in [good_suy]:#np.arange(30): # plot a a = tf.constant(np.array(a_fit, dtype='float32')) a_sfm = tf.transpose(tf.nn.softmax(tf.transpose(a))) a_sfm_expanded = tf.expand_dims(a_sfm, 0) a_sfm_expanded = tf.expand_dims(a_sfm_expanded, -1) a_sfm_np = sess2.run(a_sfm_expanded) plt.imshow(np.squeeze(a_sfm_np), cmap='gray', interpolation='nearest') plt.show() plt.draw() # maximize stimulus for a particular su vars_lst = jitter_model.variables_lr(w_mother_fit, w_del_fit, a_fit, w_stim_lr_fit,bias_su_fit ,bias_cell_fit, tcmean_fit) np.random.seed(11111) stim4D = tf.Variable(np.array(np.random.randn(1,640,320,3), dtype='float32'), name="decoded_stimulus") decode_fcn = jitter_model.decode_op_stim_lr(sess2, stim4D, sux, suy, vars_lst, FLAGS.window, FLAGS.stride, FLAGS.stim_downsample_window, FLAGS.stim_downsample_stride, model.dimensions_stimlr.dimx_slr, model.dimensions_stimlr.dimy_slr, model.dimensions.dimx, model.dimensions.dimy, FLAGS.n_cells) stim_decode, max_val = decode_fcn() print(np.shape(stim_decode)) icol =0 plt.subplot(1,2,1) plt.imshow(np.squeeze(stim_decode[0,:,:,icol]), cmap='gray', interpolation='nearest') xx = np.squeeze(stim_decode[0,:,:,icol]) rc = np.nonzero(xx>0.8*np.max(np.ndarray.flatten(xx))) xxy = xx[np.min(rc[0]):np.max(rc[0]), np.min(rc[1]):np.max(rc[1])] plt.subplot(1,2,2) plt.imshow(xxy, cmap='gray', interpolation='nearest') plt.title('Max val: '+ str(max_val)) plt.show() plt.draw() # maximize stimulus for a particular cell for mcellid in [20]:#np.arange(49): # which cell ID to plot np.random.seed(11111) stim4D = tf.Variable(np.array(np.random.randn(1,640,320,3), dtype='float32'), name="decoded_stimulus") decode_fcn = jitter_model.decode_op_stim_lr(sess2, stim4D, mcellid, -1, vars_lst, FLAGS.window, FLAGS.stride, FLAGS.stim_downsample_window, FLAGS.stim_downsample_stride, model.dimensions_stimlr.dimx_slr, model.dimensions_stimlr.dimy_slr, model.dimensions.dimx, model.dimensions.dimy, FLAGS.n_cells, max_element='cell') stim_decode, max_val = decode_fcn() print(np.shape(stim_decode)) icol =1 #plt.subplot(7, 7, mcellid+1); plt.imshow(np.squeeze(stim_decode[0, :, :, icol]), cmap='gray', interpolation='nearest') plt.show() plt.draw() ''' from IPython.terminal.embed import InteractiveShellEmbed ipshell = InteractiveShellEmbed() ipshell() ''' if __name__ == '__main__': app.run()
apache-2.0
YihaoLu/statsmodels
statsmodels/datasets/grunfeld/data.py
24
2794
"""Grunfeld (1950) Investment Data""" __docformat__ = 'restructuredtext' COPYRIGHT = """This is public domain.""" TITLE = __doc__ SOURCE = """This is the Grunfeld (1950) Investment Data. The source for the data was the original 11-firm data set from Grunfeld's Ph.D. thesis recreated by Kleiber and Zeileis (2008) "The Grunfeld Data at 50". The data can be found here. http://statmath.wu-wien.ac.at/~zeileis/grunfeld/ For a note on the many versions of the Grunfeld data circulating see: http://www.stanford.edu/~clint/bench/grunfeld.htm """ DESCRSHORT = """Grunfeld (1950) Investment Data for 11 U.S. Firms.""" DESCRLONG = DESCRSHORT NOTE = """:: Number of observations - 220 (20 years for 11 firms) Number of variables - 5 Variables name definitions:: invest - Gross investment in 1947 dollars value - Market value as of Dec. 31 in 1947 dollars capital - Stock of plant and equipment in 1947 dollars firm - General Motors, US Steel, General Electric, Chrysler, Atlantic Refining, IBM, Union Oil, Westinghouse, Goodyear, Diamond Match, American Steel year - 1935 - 1954 Note that raw_data has firm expanded to dummy variables, since it is a string categorical variable. """ from numpy import recfromtxt, column_stack, array from statsmodels.datasets import utils as du from os.path import dirname, abspath def load(): """ Loads the Grunfeld data and returns a Dataset class. Returns ------- Dataset instance: See DATASET_PROPOSAL.txt for more information. Notes ----- raw_data has the firm variable expanded to dummy variables for each firm (ie., there is no reference dummy) """ from statsmodels.tools import categorical data = _get_data() raw_data = categorical(data, col='firm', drop=True) ds = du.process_recarray(data, endog_idx=0, stack=False) ds.raw_data = raw_data return ds def load_pandas(): """ Loads the Grunfeld data and returns a Dataset class. Returns ------- Dataset instance: See DATASET_PROPOSAL.txt for more information. Notes ----- raw_data has the firm variable expanded to dummy variables for each firm (ie., there is no reference dummy) """ from pandas import DataFrame from statsmodels.tools import categorical data = _get_data() raw_data = categorical(data, col='firm', drop=True) ds = du.process_recarray_pandas(data, endog_idx=0) ds.raw_data = DataFrame(raw_data) return ds def _get_data(): filepath = dirname(abspath(__file__)) data = recfromtxt(open(filepath + '/grunfeld.csv','rb'), delimiter=",", names=True, dtype="f8,f8,f8,a17,f8") return data
bsd-3-clause
huangziwei/pyMF3
pymf3/factorization/_convexnmf.py
1
2948
import numpy as np import logging from sklearn.cluster import KMeans from ._base import Base __all__ = ['convexNMF'] class convexNMF(Base): ''' Convex-Nonnegative Matrix Factorization. Reference ========= Ding, C., Li, T., & Jordan, M. I. (2010). Convex and semi-nonnegative matrix factorizations. Pattern Analysis and Machine Intelligence, IEEE Transactions on, 32(1), 45–55. ''' def __init__(self, data, W=None, H=None, L=None, num_bases=2, filter_dims=(6,6), transpose=False, **kwargs): super(convexNMF, self).__init__(data, num_bases=num_bases, W=W, H=W, filter_dims=filter_dims, transpose=transpose) self.L = L def _cost_function(self): # print(self.W.shape) cost = np.sqrt(np.sum((self.data[:,:] - np.dot(self.W, self.H)) ** 2)) return cost def _init_W(self): # self._logger.info('Now we initialize the W and L') kmeans = KMeans(n_clusters=self._num_bases, random_state=0).fit(self.data) labels = kmeans.labels_ W = np.zeros([self.ndims[0], self._num_bases]) for i in range(self.ndims[0]): for k in range(self._num_bases): if labels[i] == k: W[i, k] = 1 else: W[i, k] = 0 self.W = W + 0.2 n = [np.sum(kmeans.labels_ == i) for i in range(self._num_bases)] try: self.L = np.dot(W, np.linalg.inv(np.diag(n))) except np.linalg.linalg.LinAlgError: self.L = np.dot(W, np.linalg.pinv(np.diag(n))) def _update_W(self): # self._logger.info('Now we update the W') def pos(A): return (np.abs(A) + A) / 2 def neg(A): return (np.abs(A) - A) / 2 VVt = np.dot(self.data, self.data.T) WLt = np.dot(self.W, self.L.T) W1 = np.dot(pos(VVt), self.L) + np.dot(np.dot(WLt, neg(VVt)), self.L) W2 = np.dot(neg(VVt), self.L) + np.dot(np.dot(WLt, pos(VVt)), self.L) W1[W1 == 0] = 1e-16 W2[W2 == 0] = 1e-16 self.W *= np.sqrt(W1/W2) if not self._transpose: for i in range(self.W.shape[1]): self.W[:, i] /= np.linalg.norm(self.W[:, i]) def _update_H(self): # self._logger.info('Now we update the H') def pos(A): return (np.abs(A) + A) / 2 def neg(A): return (np.abs(A) - A) / 2 VVt = np.dot(self.data, self.data.T) WtW = np.dot(self.W.T, self.W) L1 = np.dot(pos(VVt), self.W) + np.dot(np.dot(neg(VVt), self.L), WtW) L2 = np.dot(neg(VVt), self.W) + np.dot(np.dot(pos(VVt), self.L), WtW) L1[L1 == 0] = 1e-16 L2[L2 == 0] = 1e-16 self.L *= np.sqrt(L1/L2) self.H = np.dot(self.L.T, self.data) if self._transpose: for i in range(self.H.shape[0]): self.H[i] /= np.linalg.norm(self.H[i])
mit