repo_name
stringlengths
6
103
path
stringlengths
4
209
copies
stringlengths
1
4
size
stringlengths
4
7
content
stringlengths
838
1.04M
license
stringclasses
15 values
pytorch/fairseq
examples/simultaneous_translation/eval/agents/simul_t2t_enja.py
1
7099
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import os from fairseq import checkpoint_utils, tasks import sentencepiece as spm import torch try: from simuleval import READ_ACTION, WRITE_ACTION, DEFAULT_EOS from simuleval.agents import TextAgent except ImportError: print("Please install simuleval 'pip install simuleval'") BOS_PREFIX = "\u2581" class SimulTransTextAgentJA(TextAgent): """ Simultaneous Translation Text agent for Japanese """ def __init__(self, args): # Whether use gpu self.gpu = getattr(args, "gpu", False) # Max len self.max_len = args.max_len # Load Model self.load_model_vocab(args) # build word splitter self.build_word_splitter(args) self.eos = DEFAULT_EOS def initialize_states(self, states): states.incremental_states = dict() states.incremental_states["online"] = dict() def to_device(self, tensor): if self.gpu: return tensor.cuda() else: return tensor.cpu() def load_model_vocab(self, args): filename = args.model_path if not os.path.exists(filename): raise IOError("Model file not found: {}".format(filename)) state = checkpoint_utils.load_checkpoint_to_cpu(filename) task_args = state["cfg"]["task"] task_args.data = args.data_bin task = tasks.setup_task(task_args) # build model for ensemble state["cfg"]["model"].load_pretrained_encoder_from = None state["cfg"]["model"].load_pretrained_decoder_from = None self.model = task.build_model(state["cfg"]["model"]) self.model.load_state_dict(state["model"], strict=True) self.model.eval() self.model.share_memory() if self.gpu: self.model.cuda() # Set dictionary self.dict = {} self.dict["tgt"] = task.target_dictionary self.dict["src"] = task.source_dictionary @staticmethod def add_args(parser): # fmt: off parser.add_argument('--model-path', type=str, required=True, help='path to your pretrained model.') parser.add_argument("--data-bin", type=str, required=True, help="Path of data binary") parser.add_argument("--max-len", type=int, default=100, help="Max length of translation") parser.add_argument("--tgt-splitter-type", type=str, default="SentencePiece", help="Subword splitter type for target text.") parser.add_argument("--tgt-splitter-path", type=str, default=None, help="Subword splitter model path for target text.") parser.add_argument("--src-splitter-type", type=str, default="SentencePiece", help="Subword splitter type for source text.") parser.add_argument("--src-splitter-path", type=str, default=None, help="Subword splitter model path for source text.") # fmt: on return parser def build_word_splitter(self, args): self.spm = {} for lang in ['src', 'tgt']: if getattr(args, f'{lang}_splitter_type', None): path = getattr(args, f'{lang}_splitter_path', None) if path: self.spm[lang] = spm.SentencePieceProcessor() self.spm[lang].Load(path) def segment_to_units(self, segment, states): # Split a full word (segment) into subwords (units) return self.spm['src'].EncodeAsPieces(segment) def update_model_encoder(self, states): if len(states.units.source) == 0: return src_indices = [ self.dict['src'].index(x) for x in states.units.source.value ] if states.finish_read(): # Append the eos index when the prediction is over src_indices += [self.dict["tgt"].eos_index] src_indices = self.to_device( torch.LongTensor(src_indices).unsqueeze(0) ) src_lengths = self.to_device( torch.LongTensor([src_indices.size(1)]) ) states.encoder_states = self.model.encoder(src_indices, src_lengths) torch.cuda.empty_cache() def update_states_read(self, states): # Happens after a read action. self.update_model_encoder(states) def units_to_segment(self, units, states): # Merge sub words (units) to full word (segment). # For Japanese, we can directly send # the untokenized token to server except the BOS token # with following option # --sacrebleu-tokenizer MeCab # --eval-latency-unit char # --no-space token = units.value.pop() if ( token == self.dict["tgt"].eos_word or len(states.segments.target) > self.max_len ): return DEFAULT_EOS if BOS_PREFIX == token: return None if token[0] == BOS_PREFIX: return token[1:] else: return token def policy(self, states): if not getattr(states, "encoder_states", None): # No encoder states, read a token first return READ_ACTION # encode previous predicted target tokens tgt_indices = self.to_device( torch.LongTensor( [self.model.decoder.dictionary.eos()] + [ self.dict['tgt'].index(x) for x in states.units.target.value if x is not None ] ).unsqueeze(0) ) # Current steps states.incremental_states["steps"] = { "src": states.encoder_states["encoder_out"][0].size(0), "tgt": 1 + len(states.units.target), } # Online only means the reading is not finished states.incremental_states["online"]["only"] = ( torch.BoolTensor([not states.finish_read()]) ) x, outputs = self.model.decoder.forward( prev_output_tokens=tgt_indices, encoder_out=states.encoder_states, incremental_state=states.incremental_states, ) states.decoder_out = x torch.cuda.empty_cache() if outputs.action == 0: return READ_ACTION else: return WRITE_ACTION def predict(self, states): # Predict target token from decoder states decoder_states = states.decoder_out lprobs = self.model.get_normalized_probs( [decoder_states[:, -1:]], log_probs=True ) index = lprobs.argmax(dim=-1)[0, 0].item() if index != self.dict['tgt'].eos_index: token = self.dict['tgt'].string([index]) else: token = self.dict['tgt'].eos_word return token
mit
pytorch/fairseq
examples/MMPT/mmpt/processors/models/s3dg.py
1
12416
# This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. """Contains a PyTorch definition for Gated Separable 3D network (S3D-G) with a text module for computing joint text-video embedding from raw text and video input. The following code will enable you to load the HowTo100M pretrained S3D Text-Video model from: A. Miech, J.-B. Alayrac, L. Smaira, I. Laptev, J. Sivic and A. Zisserman, End-to-End Learning of Visual Representations from Uncurated Instructional Videos. https://arxiv.org/abs/1912.06430. S3D-G was proposed by: S. Xie, C. Sun, J. Huang, Z. Tu and K. Murphy, Rethinking Spatiotemporal Feature Learning For Video Understanding. https://arxiv.org/abs/1712.04851. Tensorflow code: https://github.com/tensorflow/models/blob/master/research/slim/nets/s3dg.py The S3D architecture was slightly modified with a space to depth trick for TPU optimization. """ import torch as th import torch.nn.functional as F import torch.nn as nn import os import numpy as np import re class InceptionBlock(nn.Module): def __init__( self, input_dim, num_outputs_0_0a, num_outputs_1_0a, num_outputs_1_0b, num_outputs_2_0a, num_outputs_2_0b, num_outputs_3_0b, gating=True, ): super(InceptionBlock, self).__init__() self.conv_b0 = STConv3D(input_dim, num_outputs_0_0a, [1, 1, 1]) self.conv_b1_a = STConv3D(input_dim, num_outputs_1_0a, [1, 1, 1]) self.conv_b1_b = STConv3D( num_outputs_1_0a, num_outputs_1_0b, [3, 3, 3], padding=1, separable=True ) self.conv_b2_a = STConv3D(input_dim, num_outputs_2_0a, [1, 1, 1]) self.conv_b2_b = STConv3D( num_outputs_2_0a, num_outputs_2_0b, [3, 3, 3], padding=1, separable=True ) self.maxpool_b3 = th.nn.MaxPool3d((3, 3, 3), stride=1, padding=1) self.conv_b3_b = STConv3D(input_dim, num_outputs_3_0b, [1, 1, 1]) self.gating = gating self.output_dim = ( num_outputs_0_0a + num_outputs_1_0b + num_outputs_2_0b + num_outputs_3_0b ) if gating: self.gating_b0 = SelfGating(num_outputs_0_0a) self.gating_b1 = SelfGating(num_outputs_1_0b) self.gating_b2 = SelfGating(num_outputs_2_0b) self.gating_b3 = SelfGating(num_outputs_3_0b) def forward(self, input): """Inception block """ b0 = self.conv_b0(input) b1 = self.conv_b1_a(input) b1 = self.conv_b1_b(b1) b2 = self.conv_b2_a(input) b2 = self.conv_b2_b(b2) b3 = self.maxpool_b3(input) b3 = self.conv_b3_b(b3) if self.gating: b0 = self.gating_b0(b0) b1 = self.gating_b1(b1) b2 = self.gating_b2(b2) b3 = self.gating_b3(b3) return th.cat((b0, b1, b2, b3), dim=1) class SelfGating(nn.Module): def __init__(self, input_dim): super(SelfGating, self).__init__() self.fc = nn.Linear(input_dim, input_dim) def forward(self, input_tensor): """Feature gating as used in S3D-G. """ spatiotemporal_average = th.mean(input_tensor, dim=[2, 3, 4]) weights = self.fc(spatiotemporal_average) weights = th.sigmoid(weights) return weights[:, :, None, None, None] * input_tensor class STConv3D(nn.Module): def __init__( self, input_dim, output_dim, kernel_size, stride=1, padding=0, separable=False ): super(STConv3D, self).__init__() self.separable = separable self.relu = nn.ReLU(inplace=True) assert len(kernel_size) == 3 if separable and kernel_size[0] != 1: spatial_kernel_size = [1, kernel_size[1], kernel_size[2]] temporal_kernel_size = [kernel_size[0], 1, 1] if isinstance(stride, list) and len(stride) == 3: spatial_stride = [1, stride[1], stride[2]] temporal_stride = [stride[0], 1, 1] else: spatial_stride = [1, stride, stride] temporal_stride = [stride, 1, 1] if isinstance(padding, list) and len(padding) == 3: spatial_padding = [0, padding[1], padding[2]] temporal_padding = [padding[0], 0, 0] else: spatial_padding = [0, padding, padding] temporal_padding = [padding, 0, 0] if separable: self.conv1 = nn.Conv3d( input_dim, output_dim, kernel_size=spatial_kernel_size, stride=spatial_stride, padding=spatial_padding, bias=False, ) self.bn1 = nn.BatchNorm3d(output_dim) self.conv2 = nn.Conv3d( output_dim, output_dim, kernel_size=temporal_kernel_size, stride=temporal_stride, padding=temporal_padding, bias=False, ) self.bn2 = nn.BatchNorm3d(output_dim) else: self.conv1 = nn.Conv3d( input_dim, output_dim, kernel_size=kernel_size, stride=stride, padding=padding, bias=False, ) self.bn1 = nn.BatchNorm3d(output_dim) def forward(self, input): out = self.relu(self.bn1(self.conv1(input))) if self.separable: out = self.relu(self.bn2(self.conv2(out))) return out class MaxPool3dTFPadding(th.nn.Module): def __init__(self, kernel_size, stride=None, padding="SAME"): super(MaxPool3dTFPadding, self).__init__() if padding == "SAME": padding_shape = self._get_padding_shape(kernel_size, stride) self.padding_shape = padding_shape self.pad = th.nn.ConstantPad3d(padding_shape, 0) self.pool = th.nn.MaxPool3d(kernel_size, stride, ceil_mode=True) def _get_padding_shape(self, filter_shape, stride): def _pad_top_bottom(filter_dim, stride_val): pad_along = max(filter_dim - stride_val, 0) pad_top = pad_along // 2 pad_bottom = pad_along - pad_top return pad_top, pad_bottom padding_shape = [] for filter_dim, stride_val in zip(filter_shape, stride): pad_top, pad_bottom = _pad_top_bottom(filter_dim, stride_val) padding_shape.append(pad_top) padding_shape.append(pad_bottom) depth_top = padding_shape.pop(0) depth_bottom = padding_shape.pop(0) padding_shape.append(depth_top) padding_shape.append(depth_bottom) return tuple(padding_shape) def forward(self, inp): inp = self.pad(inp) out = self.pool(inp) return out class Sentence_Embedding(nn.Module): def __init__( self, embd_dim, num_embeddings=66250, word_embedding_dim=300, token_to_word_path="dict.npy", max_words=16, output_dim=2048, ): super(Sentence_Embedding, self).__init__() self.word_embd = nn.Embedding(num_embeddings, word_embedding_dim) self.fc1 = nn.Linear(word_embedding_dim, output_dim) self.fc2 = nn.Linear(output_dim, embd_dim) self.word_to_token = {} self.max_words = max_words token_to_word = np.load(token_to_word_path) for i, t in enumerate(token_to_word): self.word_to_token[t] = i + 1 def _zero_pad_tensor_token(self, tensor, size): if len(tensor) >= size: return tensor[:size] else: zero = th.zeros(size - len(tensor)).long() return th.cat((tensor, zero), dim=0) def _split_text(self, sentence): w = re.findall(r"[\w']+", str(sentence)) return w def _words_to_token(self, words): words = [ self.word_to_token[word] for word in words if word in self.word_to_token ] if words: we = self._zero_pad_tensor_token(th.LongTensor(words), self.max_words) return we else: return th.zeros(self.max_words).long() def _words_to_ids(self, x): split_x = [self._words_to_token(self._split_text(sent.lower())) for sent in x] return th.stack(split_x, dim=0) def forward(self, x): x = self._words_to_ids(x) x = self.word_embd(x) x = F.relu(self.fc1(x)) x = th.max(x, dim=1)[0] x = self.fc2(x) return {'text_embedding': x} class S3D(nn.Module): def __init__(self, dict_path, num_classes=512, gating=True, space_to_depth=True): super(S3D, self).__init__() self.num_classes = num_classes self.gating = gating self.space_to_depth = space_to_depth if space_to_depth: self.conv1 = STConv3D( 24, 64, [2, 4, 4], stride=1, padding=(1, 2, 2), separable=False ) else: self.conv1 = STConv3D( 3, 64, [3, 7, 7], stride=2, padding=(1, 3, 3), separable=False ) self.conv_2b = STConv3D(64, 64, [1, 1, 1], separable=False) self.conv_2c = STConv3D(64, 192, [3, 3, 3], padding=1, separable=True) self.gating = SelfGating(192) self.maxpool_2a = MaxPool3dTFPadding( kernel_size=(1, 3, 3), stride=(1, 2, 2), padding="SAME" ) self.maxpool_3a = MaxPool3dTFPadding( kernel_size=(1, 3, 3), stride=(1, 2, 2), padding="SAME" ) self.mixed_3b = InceptionBlock(192, 64, 96, 128, 16, 32, 32) self.mixed_3c = InceptionBlock( self.mixed_3b.output_dim, 128, 128, 192, 32, 96, 64 ) self.maxpool_4a = MaxPool3dTFPadding( kernel_size=(3, 3, 3), stride=(2, 2, 2), padding="SAME" ) self.mixed_4b = InceptionBlock( self.mixed_3c.output_dim, 192, 96, 208, 16, 48, 64 ) self.mixed_4c = InceptionBlock( self.mixed_4b.output_dim, 160, 112, 224, 24, 64, 64 ) self.mixed_4d = InceptionBlock( self.mixed_4c.output_dim, 128, 128, 256, 24, 64, 64 ) self.mixed_4e = InceptionBlock( self.mixed_4d.output_dim, 112, 144, 288, 32, 64, 64 ) self.mixed_4f = InceptionBlock( self.mixed_4e.output_dim, 256, 160, 320, 32, 128, 128 ) self.maxpool_5a = self.maxPool3d_5a_2x2 = MaxPool3dTFPadding( kernel_size=(2, 2, 2), stride=(2, 2, 2), padding="SAME" ) self.mixed_5b = InceptionBlock( self.mixed_4f.output_dim, 256, 160, 320, 32, 128, 128 ) self.mixed_5c = InceptionBlock( self.mixed_5b.output_dim, 384, 192, 384, 48, 128, 128 ) self.fc = nn.Linear(self.mixed_5c.output_dim, num_classes) self.text_module = Sentence_Embedding(num_classes, token_to_word_path=dict_path) def _space_to_depth(self, input): """3D space to depth trick for TPU optimization. """ B, C, T, H, W = input.shape input = input.view(B, C, T // 2, 2, H // 2, 2, W // 2, 2) input = input.permute(0, 3, 5, 7, 1, 2, 4, 6) input = input.contiguous().view(B, 8 * C, T // 2, H // 2, W // 2) return input def forward(self, inputs): """Defines the S3DG base architecture.""" if self.space_to_depth: inputs = self._space_to_depth(inputs) net = self.conv1(inputs) if self.space_to_depth: # we need to replicate 'SAME' tensorflow padding net = net[:, :, 1:, 1:, 1:] net = self.maxpool_2a(net) net = self.conv_2b(net) net = self.conv_2c(net) if self.gating: net = self.gating(net) net = self.maxpool_3a(net) net = self.mixed_3b(net) net = self.mixed_3c(net) net = self.maxpool_4a(net) net = self.mixed_4b(net) net = self.mixed_4c(net) net = self.mixed_4d(net) net = self.mixed_4e(net) net = self.mixed_4f(net) net = self.maxpool_5a(net) net = self.mixed_5b(net) net = self.mixed_5c(net) net = th.mean(net, dim=[2, 3, 4]) return {'video_embedding': self.fc(net), 'mixed_5c': net}
mit
agartland/utils
hclusterplot.py
1
24750
import matplotlib as mpl import matplotlib.pyplot as plt import matplotlib.gridspec as gridspec from matplotlib.gridspec import GridSpec import palettable import pandas as pd import scipy.spatial.distance as distance import scipy.cluster.hierarchy as sch from sklearn.cluster.bicluster import SpectralBiclustering, SpectralCoclustering import numpy as np import itertools from corrplots import scatterfit __all__ = ['plotHCluster', 'plotHColCluster', 'plotCorrHeatmap', 'mapColors2Labels', 'computeDMat', 'computeHCluster', 'plotBicluster', 'labeledDendrogram', 'clusterOrder'] def clean_axis(ax): """Remove ticks, tick labels, and frame from axis""" ax.get_xaxis().set_ticks([]) ax.get_yaxis().set_ticks([]) for sp in list(ax.spines.values()): sp.set_visible(False) ax.grid(False) ax.set_facecolor('white') def mapColors2Labels(labels, setStr='Set3', cmap=None, returnLookup=False): """Return pd.Series of colors based on labels""" if cmap is None: N = max(3, min(12, len(np.unique(labels)))) cmap = palettable.colorbrewer.get_map(setStr, 'Qualitative', N).mpl_colors cmapLookup = {k:col for k, col in zip(sorted(np.unique(labels)), itertools.cycle(cmap))} if returnLookup: return labels.map(cmapLookup.get), cmapLookup else: return labels.map(cmapLookup.get) def computeDMat(df, metric=None, minN=1, dfunc=None): if dfunc is None: if metric in ['spearman', 'pearson']: """Anti-correlations are also considered as high similarity and will cluster together""" """dmat = 1 - df.corr(method = metric, min_periods = minN).values dmat[np.isnan(dmat)] = 1 """ dmat = 1 - df.corr(method = metric, min_periods = minN).values**2 dmat[np.isnan(dmat)] = 1 elif metric in ['spearman-signed', 'pearson-signed']: """Anti-correlations are considered as dissimilar and will NOT cluster together""" dmat = (1 - df.corr(method = metric.replace('-signed', ''), min_periods = minN).values) / 2 dmat[np.isnan(dmat)] = 1 else: dmat = distance.squareform(distance.pdist(df.T, metric = metric)) else: ncols = df.shape[1] dmat = np.zeros((ncols, ncols)) for i in range(ncols): for j in range(ncols): """Assume its symetrical""" if i<=j: tmpdf = df.iloc[:, [i, j]] tmpdf = tmpdf.dropna() if tmpdf.shape[0] >= minN: d = dfunc(df.iloc[:, i], df.iloc[:, j]) else: d = np.nan dmat[i, j] = d dmat[j, i] = d assert dmat.shape[0] == dmat.shape[1] assert dmat.shape[0] == df.shape[1] return dmat def clusterOrder(df, axis=0, metric='correlation', method='complete'): if axis == 0: dvec = distance.pdist(df, metric=metric) else: dvec = distance.pdist(df.T, metric=metric) clusters = sch.linkage(dvec, method=method) den = sch.dendrogram(clusters, color_threshold=np.inf, no_plot=True) if axis == 0: order = df.index[den['leaves']].tolist() else: order = df.T.index[den['leaves']].tolist() return order def computeHCluster(dmat, method='complete'): """Compute dmat, clusters and dendrogram of df using the linkage method and distance metric given""" if dmat.shape[0] == dmat.shape[1]: if type(dmat) is pd.DataFrame: #compressedDmat = dmat.values[np.triu_indices_from(dmat.values)].ravel() compressedDmat = distance.squareform(dmat.values) else: #compressedDmat = dmat[np.triu_indices_from(dmat)].ravel() compressedDmat = distance.squareform(dmat) else: raise clusters = sch.linkage(compressedDmat, method=method) den = sch.dendrogram(clusters, color_threshold=np.inf, no_plot=True) return clusters, den def testData(rows=50,columns=20): data = np.random.multivariate_normal(rand(columns), rand(columns, columns), rows) df = pd.DataFrame(data, columns=[''.join([lett]*9) for lett in 'ABCDEFGHIJKLMNOPQRST']) rowLabels = pd.Series(rand(rows).round(), index=df.index) columnLabels = pd.Series(rand(columns).round(), index=df.columns) return {'df':df,'row_labels':rowLabels,'col_labels':columnLabels} def addColorbar(fig,cb_ax,data_ax,label='Correlation'): """Colorbar""" cb = fig.colorbar(data_ax, cb_ax) # note that we could pass the norm explicitly with norm=my_norm cb.set_label(label) """Make colorbar labels smaller""" for t in cb.ax.yaxis.get_ticklabels(): t.set_fontsize('small') def plotCorrHeatmap(df=None, metric='pearson', rowInd=None, colInd=None, col_labels=None, titleStr=None, vRange=None, tickSz='large', cmap=None, dmat=None, cbLabel='Correlation', minN=1): """Plot a heatmap of a column-wise distance matrix defined by metric (can be 'spearman' as well) Can provide dmat as a pd.DataFrame instead of df. Optionally supply a column index colInd to reorder the columns to match a previous clustering Optionally, col_labels will define a color strip along the yaxis to show groups""" fig = plt.gcf() fig.clf() if dmat is None and df is None: print('Need to provide df or dmat') return elif df is None: rowLabels = dmat.index columnLabels = dmat.columns dmat = dmat.values elif dmat is None: dmat = computeDMat(df, metric, minN=minN) rowLabels = df.columns columnLabels = df.columns if cmap is None: cmap = palettable.colorbrewer.diverging.RdBu_11_r.mpl_colormap if colInd is None: colInd = np.arange(dmat.shape[1]) if rowInd is None: rowInd = colInd if col_labels is None: heatmapAX = fig.add_subplot(GridSpec(1, 1, left=0.05, bottom=0.05, right=0.78, top=0.85)[0, 0]) scale_cbAX = fig.add_subplot(GridSpec(1, 1, left=0.87, bottom=0.05, right=0.93, top=0.85)[0, 0]) else: col_cbAX = fig.add_subplot(GridSpec(1, 1, left=0.05, bottom=0.05, right=0.08, top=0.85)[0, 0]) heatmapAX = fig.add_subplot(GridSpec(1, 1, left=0.11, bottom=0.05, right=0.78, top=0.85)[0, 0]) scale_cbAX = fig.add_subplot(GridSpec(1, 1, left=0.87, bottom=0.05, right=0.93, top=0.85)[0, 0]) if vRange is None: vmin, vmax = (-1, 1) #vmin = dmat.flatten().min() #vmax = dmat.flatten().max() else: vmin, vmax = vRange my_norm = mpl.colors.Normalize(vmin=vmin, vmax=vmax) """Column label colorbar but along the rows""" if not col_labels is None: col_cbSE = mapColors2Labels(col_labels) col_axi = col_cbAX.imshow([[x] for x in col_cbSE.iloc[rowInd].values], interpolation='nearest', aspect='auto', origin='lower') clean_axis(col_cbAX) """Heatmap plot""" axi = heatmapAX.imshow(dmat[rowInd,:][:, colInd], interpolation='nearest', aspect='auto', origin='lower', norm=my_norm, cmap=cmap) clean_axis(heatmapAX) """Column tick labels along the rows""" if tickSz is None: heatmapAX.set_yticks([]) heatmapAX.set_xticks([]) else: heatmapAX.set_yticks(np.arange(dmat.shape[1])) heatmapAX.yaxis.set_ticks_position('right') heatmapAX.set_yticklabels(rowLabels[colInd], fontsize=tickSz, fontname='Consolas') """Column tick labels""" heatmapAX.set_xticks(np.arange(dmat.shape[1])) heatmapAX.xaxis.set_ticks_position('top') xlabelsL = heatmapAX.set_xticklabels(columnLabels[colInd], fontsize=tickSz, rotation=90, fontname='Consolas') """Remove the tick lines""" for l in heatmapAX.get_xticklines() + heatmapAX.get_yticklines(): l.set_markersize(0) addColorbar(fig, scale_cbAX, axi, label=cbLabel) """Add title as xaxis label""" if not titleStr is None: heatmapAX.set_xlabel(titleStr, size='x-large') def plotHColCluster(df=None, col_dmat=None, method='complete', metric='euclidean', col_labels=None, titleStr=None, vRange=None, tickSz='medium', cmap=None, minN=1, K=None, labelCmap=None, noColorBar=False, interactive=False): """Perform hierarchical clustering on df columns and plot square heatmap of pairwise distances""" if col_dmat is None and df is None: print('Need to provide df or col_dmat') return elif df is None: columnLabels = col_dmat.columns col_dmat = col_dmat.values colorbarLabel = '' col_plot = col_dmat elif col_dmat is None: col_dmat = computeDMat(df, metric, minN=minN) columnLabels = df.columns if metric in ['spearman', 'pearson', 'spearman-signed', 'pearson-signed']: """If it's a correlation metric, plot Rho not the dmat""" colorbarLabel = 'Correlation coefficient' if metric in ['spearman-signed', 'pearson-signed']: col_plot = df.corr(method=metric.replace('-signed', ''), min_periods=minN).values else: col_plot = df.corr(method=metric, min_periods=minN).values else: colorbarLabel = '' col_plot = col_dmat else: col_plot = col_dmat columnLabels = df.columns colorbarLabel = '' nCols = col_dmat.shape[1] if cmap is None: if metric in ['spearman', 'pearson', 'spearman-signed', 'pearson-signed']: cmap = palettable.colorbrewer.diverging.RdBu_11_r.mpl_colormap else: cmap = palettable.colorbrewer.sequential.YlOrRd_9.mpl_colormap col_clusters, col_den = computeHCluster(col_dmat, method) if col_labels is None and not K is None: col_labels = pd.Series(sch.fcluster(col_clusters, K, criterion='maxclust'), index=columnLabels) if isinstance(col_plot, pd.DataFrame): col_plot = col_plot.values if vRange is None: if metric in ['spearman', 'pearson', 'spearman-signed', 'pearson-signed']: vmin, vmax = (-1, 1) else: vmin = col_plot.min() vmax = col_plot.max() else: vmin, vmax = vRange my_norm = mpl.colors.Normalize(vmin=vmin, vmax=vmax) fig = plt.gcf() fig.clf() #heatmapGS = gridspec.GridSpec(1,4,wspace=0.0,width_ratios=[0.25,0.01,2,0.15]) if col_labels is None and K is None: col_denAX = fig.add_subplot(GridSpec(1, 1, left=0.05, bottom=0.05, right=0.15, top=0.85)[0, 0]) heatmapAX = fig.add_subplot(GridSpec(1, 1, left=0.16, bottom=0.05, right=0.75, top=0.85)[0, 0]) if not noColorBar: scale_cbAX = fig.add_subplot(GridSpec(1, 1, left=0.94, bottom=0.05, right=0.97, top=0.85)[0, 0]) else: """TODO: work on row_cbAX so that I can have the data labels on the top and left""" col_denAX = fig.add_subplot(GridSpec(1, 1, left=0.05, bottom=0.05, right=0.15, top=0.85)[0, 0]) col_cbAX = fig.add_subplot(GridSpec(1, 1, left=0.16, bottom=0.05, right=0.19, top=0.85)[0, 0]) #row_cbAX = fig.add_subplot(GridSpec(1,1,left=0.2,bottom=0.83,right=0.75,top=0.86)[0,0]) heatmapAX = fig.add_subplot(GridSpec(1, 1, left=0.2, bottom=0.05, right=0.75, top=0.85)[0, 0]) if not noColorBar: scale_cbAX = fig.add_subplot(GridSpec(1, 1, left=0.94, bottom=0.05, right=0.97, top=0.85)[0, 0]) """Column dendrogaram but along the rows""" plt.sca(col_denAX) col_denD = sch.dendrogram(col_clusters, color_threshold=np.inf, orientation='left') colInd = col_denD['leaves'] clean_axis(col_denAX) """Column label colorbar but along the rows""" if not col_labels is None: col_cbSE = mapColors2Labels(col_labels, cmap=labelCmap) col_axi = col_cbAX.imshow([[x] for x in col_cbSE.iloc[colInd].values], interpolation='nearest', aspect='auto', origin='lower') clean_axis(col_cbAX) """Heatmap plot""" axi = heatmapAX.imshow(col_plot[colInd,:][:, colInd], interpolation='nearest', aspect='auto', origin='lower', norm=my_norm, cmap=cmap) clean_axis(heatmapAX) """Column tick labels along the rows""" if tickSz is None: heatmapAX.set_yticks(()) heatmapAX.set_xticks(()) else: heatmapAX.set_yticks(np.arange(nCols)) heatmapAX.yaxis.set_ticks_position('right') heatmapAX.set_yticklabels(columnLabels[colInd], fontsize=tickSz, fontname='Consolas') """Column tick labels""" heatmapAX.set_xticks(np.arange(nCols)) heatmapAX.xaxis.set_ticks_position('top') xlabelsL = heatmapAX.set_xticklabels(columnLabels[colInd], fontsize=tickSz, rotation=90, fontname='Consolas') """Remove the tick lines""" for l in heatmapAX.get_xticklines() + heatmapAX.get_yticklines(): l.set_markersize(0) if not noColorBar: addColorbar(fig, scale_cbAX, axi, label=colorbarLabel) """Add title as xaxis label""" if not titleStr is None: heatmapAX.set_xlabel(titleStr, size='x-large') if interactive and not df is None: scatterFig = plt.figure(fig.number + 100) ps = PairScatter(df.iloc[:, colInd], heatmapAX, scatterFig.add_subplot(111), method=metric) return colInd, ps return colInd def plot1DHClust(distDf, hclusters, labels=None, titleStr=None, vRange=None, tickSz='small', cmap=None, colorbarLabel=None, labelCmap=None, noColorBar=False): """Plot hierarchical clustering results (no computation) I'm not even sure this is useful...""" if cmap is None: cmap = palettable.colorbrewer.sequential.YlOrRd_9.mpl_colormap fig = plt.gcf() fig.clf() nCols = distDf.shape[0] if labels is None: col_denAX = fig.add_subplot(GridSpec(1, 1, left=0.05, bottom=0.05, right=0.15, top=0.85)[0, 0]) heatmapAX = fig.add_subplot(GridSpec(1, 1, left=0.16, bottom=0.05, right=0.78, top=0.85)[0, 0]) if not noColorBar: scale_cbAX = fig.add_subplot(GridSpec(1, 1, left=0.87, bottom=0.05, right=0.93, top=0.85)[0, 0]) else: col_denAX = fig.add_subplot(GridSpec(1, 1, left=0.05, bottom=0.05, right=0.15, top=0.85)[0, 0]) col_cbAX = fig.add_subplot(GridSpec(1, 1, left=0.16, bottom=0.05, right=0.19, top=0.85)[0, 0]) heatmapAX = fig.add_subplot(GridSpec(1, 1, left=0.2, bottom=0.05, right=0.78, top=0.85)[0, 0]) if not noColorBar: scale_cbAX = fig.add_subplot(GridSpec(1, 1, left=0.87, bottom=0.05, right=0.93, top=0.85)[0, 0]) if vRange is None: vmin = distDf.values.min() vmax = distDf.vlaues.max() else: vmin, vmax = vRange my_norm = mpl.colors.Normalize(vmin = vmin, vmax = vmax) """Column dendrogaram but along the rows""" plt.axes(col_denAX) colInd = hclusters['leaves'] clean_axis(col_denAX) imshowOptions = dict(interpolation = 'nearest', aspect = 'auto', origin = 'lower') """Column label colorbar but along the rows""" if not labels is None: col_cbSE = mapColors2Labels(labels, cmap = labelCmap) col_axi = col_cbAX.imshow([[x] for x in col_cbSE.iloc[colInd].values], **imshowOptions) clean_axis(col_cbAX) """Heatmap plot""" axi = heatmapAX.imshow(distDf.values[colInd,:][:, colInd], norm = my_norm, cmap = cmap, **imshowOptions) clean_axis(heatmapAX) """Column tick labels along the rows""" if tickSz is None: heatmapAX.set_yticks(()) heatmapAX.set_xticks(()) else: heatmapAX.set_yticks(np.arange(nCols)) heatmapAX.yaxis.set_ticks_position('right') heatmapAX.set_yticklabels(distDf.columns[colInd], fontsize=tickSz, fontname='Consolas') """Column tick labels""" heatmapAX.set_xticks(np.arange(nCols)) heatmapAX.xaxis.set_ticks_position('top') xlabelsL = heatmapAX.set_xticklabels(distDf.columns[colInd], fontsize=tickSz, rotation=90, fontname='Consolas') """Remove the tick lines""" for l in heatmapAX.get_xticklines() + heatmapAX.get_yticklines(): l.set_markersize(0) if not noColorBar: addColorbar(fig, scale_cbAX, axi, label=colorbarLabel) """Add title as xaxis label""" if not titleStr is None: heatmapAX.set_xlabel(titleStr, size='x-large') def plotHCluster(df, method='complete', metric='euclidean', clusterBool=[True, True],row_labels=None, col_labels=None, vRange=None,titleStr=None,xTickSz='small',yTickSz='small',cmap=None,minN=1): """Perform hierarchical clustering on df data columns (and rows) and plot results as dendrograms and heatmap. df - pd.DataFrame(), will use index and column labels as tick labels method and metric - parameters passed to scipy.spatial.distance.pdist and scipy.cluster.hierarchy.linkage row_labels - pd.Series with index same as df with values indicating groups (optional) col_labels - pd.Series with index same as columns in df with values indicating groups (optional) vMinMax - optional scaling, [vmin, vmax] can be derived from data clusterBool - [row, col] bool indicating whether to cluster along that axis """ if cmap is None: cmap = palettable.colorbrewer.diverging.RdBu_11_r.mpl_colormap if vRange is None: vmin = df.min().min() vmax = df.max().max() else: vmin, vmax = vRange my_norm = mpl.colors.Normalize(vmin, vmax) fig = plt.gcf() fig.clf() if clusterBool[1]: heatmapGS = gridspec.GridSpec(3, 3, wspace=0.0, hspace=0.0, width_ratios=[0.15, 0.02, 1], height_ratios=[0.15, 0.02, 1]) else: heatmapGS = gridspec.GridSpec(3, 3, wspace=0.0, hspace=0.0, width_ratios=[0.15, 0.02, 1], height_ratios=[0.001, 0.02, 1]) if clusterBool[0]: row_dmat = computeDMat(df.T, metric, minN=minN) row_clusters, row_den = computeHCluster(row_dmat, method) """Dendrogarams""" row_denAX = fig.add_subplot(heatmapGS[2, 0]) row_denD = sch.dendrogram(row_clusters, color_threshold=np.inf, orientation='left') clean_axis(row_denAX) rowInd = row_denD['leaves'] else: rowInd = np.arange(df.shape[0]) """Row colorbar""" if not row_labels is None: """NOTE: row_labels will not be index aware and must be in identical order as data""" row_cbSE = mapColors2Labels(row_labels, 'Set1') row_cbAX = fig.add_subplot(heatmapGS[2, 1]) row_axi = row_cbAX.imshow([[x] for x in row_cbSE.iloc[rowInd].values], interpolation='nearest', aspect='auto', origin='lower') clean_axis(row_cbAX) if clusterBool[1]: col_dmat = computeDMat(df, metric, minN=minN) col_clusters, col_den = computeHCluster(col_dmat, method) """Dendrogarams""" col_denAX = fig.add_subplot(heatmapGS[0, 2]) col_denD = sch.dendrogram(col_clusters, color_threshold=np.inf) clean_axis(col_denAX) colInd = col_denD['leaves'] else: colInd = np.arange(df.shape[1]) """Column colorbar""" if not col_labels is None: col_cbSE = mapColors2Labels(col_labels) col_cbAX = fig.add_subplot(heatmapGS[1, 2]) col_axi = col_cbAX.imshow([list(col_cbSE.iloc[colInd])], interpolation='nearest', aspect='auto', origin='lower') clean_axis(col_cbAX) """Heatmap plot""" heatmapAX = fig.add_subplot(heatmapGS[2, 2]) axi = heatmapAX.imshow(df.iloc[rowInd, colInd], interpolation='nearest', aspect='auto', origin='lower', norm=my_norm, cmap=cmap) clean_axis(heatmapAX) heatmapAX.grid(False) """Row tick labels""" heatmapAX.set_yticks(np.arange(df.shape[0])) ylabelsL = None if not yTickSz is None: heatmapAX.yaxis.set_ticks_position('right') ylabelsL = heatmapAX.set_yticklabels(df.index[rowInd], fontsize=yTickSz, fontname='Consolas') else: ylabelsL = heatmapAX.set_yticklabels([]) """Add title as xaxis label""" if not titleStr is None: heatmapAX.set_xlabel(titleStr, size='x-large') """Column tick labels""" heatmapAX.set_xticks(np.arange(df.shape[1])) xlabelsL = None if not xTickSz is None: xlabelsL = heatmapAX.set_xticklabels(df.columns[colInd], fontsize=xTickSz, rotation=90, fontname='Consolas') """Remove the tick lines""" for l in heatmapAX.get_xticklines() + heatmapAX.get_yticklines(): l.set_markersize(0) """Colorbar""" scaleGS = gridspec.GridSpec(10, 15, wspace=0., hspace=0.) scale_cbAX = fig.add_subplot(scaleGS[:2, 0]) # colorbar for scale in upper left corner cb = fig.colorbar(axi, scale_cbAX) # note that we could pass the norm explicitly with norm=my_norm cb.set_label('Measurements') cb.ax.yaxis.set_ticks_position('left') # move ticks to left side of colorbar to avoid problems with tight_layout cb.ax.yaxis.set_label_position('left') # move label to left side of colorbar to avoid problems with tight_layout #cb.outline.set_linewidth(0) """Make colorbar labels smaller""" for t in cb.ax.yaxis.get_ticklabels(): t.set_fontsize('small') scaleGS.tight_layout(fig, h_pad=0.0, w_pad=0.0) heatmapGS.tight_layout(fig, h_pad=0.1, w_pad=0.5) handles = dict(cb=cb, heatmapAX=heatmapAX, fig=fig, xlabelsL=xlabelsL, ylabelsL=ylabelsL, heatmapGS=heatmapGS) return rowInd, colInd, handles def plotBicluster(df, n_clusters, col_labels=None): model = SpectralBiclustering(n_clusters=n_clusters, method='log', random_state=0) model.fit(df) fitDf = df.iloc[np.argsort(model.row_labels_),:] fitDf = fitDf.iloc[:, np.argsort(model.column_labels_)] plotCorrHeatmap(dmat=fitDf, col_labels=col_labels) return fitDf def normalizeAxis(df,axis=0,useMedian=False): """Normalize along the specified axis by subtracting the mean and dividing by the stdev. Uses df functions that ignore NAs Parameters ---------- df : pd.DataFrame axis : int Normalization along this axis. (e.g. df.mean(axis=axis)) Returns ------- out : pd.DataFrame""" tmp = df.copy() retile = ones(len(df.shape)) retile[axis] = df.shape[axis] if useMedian: tmp = tmp - tile(tmp.median(axis=axis).values, retile) else: tmp = tmp - tile(tmp.mean(axis=axis).values, retile) tmp = tmp / tile(tmp.std(axis=axis).values, retile) return tmp class PairScatter: """Instantiate this class to interactively pair a heatmap and a pairwise scatterfit plot in a new figure window.""" def __init__(self, df, heatmapAx, scatterAx, method): self.scatterAx = scatterAx self.heatmapAx = heatmapAx self.df = df self.method = method self.cid = heatmapAx.figure.canvas.mpl_connect('button_press_event', self) def __call__(self, event): if event.inaxes != self.heatmapAx: return else: xind = int(np.floor(event.xdata + 0.5)) yind = int(np.floor(event.ydata + 0.5)) plt.sca(self.scatterAx) plt.cla() scatterfit(self.df.iloc[:, xind], self.df.iloc[:, yind], method = self.method, plotLine = True) self.scatterAx.figure.show() def labeledDendrogram(dmat, labels, method='complete', cmap=None): """Perform hierarchical clustering on df columns and plot square heatmap of pairwise distances""" """TODO: add tick labels, with sparsity option""" Z = sch.linkage(dmat, method=method) den = sch.dendrogram(Z, color_threshold=np.inf, no_plot=True) figh = plt.gcf() figh.clf() denAX = figh.add_axes([0.32, 0.05, 0.6, 0.9]) cbAX = figh.add_axes([0.25, 0.05, 0.05, 0.9]) plt.sca(denAX) denD = sch.dendrogram(Z, color_threshold=np.inf, orientation='left') ind = denD['leaves'] clean_axis(denAX) cbSE, lookup = mapColors2Labels(labels, cmap=cmap, returnLookup=True) axi = cbAX.imshow([[x] for x in cbSE.iloc[ind].values], interpolation='nearest', aspect='auto', origin='lower') clean_axis(cbAX) colorLegend(list(lookup.values()), list(lookup.keys()), axh=denAX)
mit
tlby/mxnet
tests/nightly/estimator/test_sentiment_rnn.py
5
11113
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. """Gluon Text Sentiment Classification Example using RNN/CNN Example modified from below link: https://github.com/d2l-ai/d2l-en/blob/master/chapter_natural-language-processing/sentiment-analysis-rnn.md https://github.com/d2l-ai/d2l-en/blob/master/chapter_natural-language-processing/sentiment-analysis-cnn.md""" import collections import os import random import sys import tarfile import mxnet as mx from mxnet import nd, gluon from mxnet.contrib import text from mxnet.gluon import nn, rnn from mxnet.gluon.contrib.estimator import estimator import pytest class TextCNN(nn.Block): def __init__(self, vocab, embed_size, kernel_sizes, num_channels, **kwargs): super(TextCNN, self).__init__(**kwargs) self.embedding = nn.Embedding(len(vocab), embed_size) # The embedding layer does not participate in training self.constant_embedding = nn.Embedding(len(vocab), embed_size) self.dropout = nn.Dropout(0.5) self.decoder = nn.Dense(2) # The max-over-time pooling layer has no weight, so it can share an # instance self.pool = nn.GlobalMaxPool1D() # Create multiple one-dimensional convolutional layers self.convs = nn.Sequential() for c, k in zip(num_channels, kernel_sizes): self.convs.add(nn.Conv1D(c, k, activation='relu')) def forward(self, inputs): # Concatenate the output of two embedding layers with shape of # (batch size, number of words, word vector dimension) by word vector embeddings = mx.np.concatenate( [self.embedding(inputs), self.constant_embedding(inputs)], axis=2) # According to the input format required by Conv1D, the word vector # dimension, that is, the channel dimension of the one-dimensional # convolutional layer, is transformed into the previous dimension embeddings = embeddings.transpose((0, 2, 1)) # For each one-dimensional convolutional layer, after max-over-time # pooling, an NDArray with the shape of (batch size, channel size, 1) # can be obtained. Use the flatten function to remove the last # dimension and then concatenate on the channel dimension encoding = mx.np.concatenate([mx.npx.batch_flatten( self.pool(conv(embeddings))) for conv in self.convs], axis=1) # After applying the dropout method, use a fully connected layer to # obtain the output outputs = self.decoder(self.dropout(encoding)) return outputs class BiRNN(nn.Block): def __init__(self, vocab, embed_size, num_hiddens, num_layers, **kwargs): super(BiRNN, self).__init__(**kwargs) self.embedding = nn.Embedding(len(vocab), embed_size) # Set Bidirectional to True to get a bidirectional recurrent neural # network self.encoder = rnn.LSTM(num_hiddens, num_layers=num_layers, bidirectional=True, input_size=embed_size) self.decoder = nn.Dense(2) def forward(self, inputs): # The shape of inputs is (batch size, number of words). Because LSTM # needs to use sequence as the first dimension, the input is # transformed and the word feature is then extracted. The output shape # is (number of words, batch size, word vector dimension). embeddings = self.embedding(inputs.T) # The shape of states is (number of words, batch size, 2 * number of # hidden units). states = self.encoder(embeddings) # Concatenate the hidden states of the initial time step and final # time step to use as the input of the fully connected layer. Its # shape is (batch size, 4 * number of hidden units) encoding = mx.np.concatenate([states[0], states[-1]], axis=1) outputs = self.decoder(encoding) return outputs def download_imdb(data_dir='/tmp/data'): ''' Download and extract the IMDB dataset ''' # Large Movie Review Dataset from http://ai.stanford.edu/~amaas/data/sentiment/ # Note this dataset is copyright to Andrew Maas and Stanford AI Lab # @InProceedings{maas-EtAl:2011:ACL-HLT2011, # author = {Maas, Andrew L. and Daly, Raymond E. and Pham, Peter T. and Huang, Dan and Ng, Andrew Y. and Potts, Christopher}, # title = {Learning Word Vectors for Sentiment Analysis}, # booktitle = {Proceedings of the 49th Annual Meeting of the Association for Computational Linguistics: Human Language Technologies}, # month = {June}, # year = {2011}, # address = {Portland, Oregon, USA}, # publisher = {Association for Computational Linguistics}, # pages = {142--150}, # url = {http://www.aclweb.org/anthology/P11-1015} # } url = ('https://aws-ml-platform-datasets.s3.amazonaws.com/imdb/aclImdb_v1.tar.gz') sha1 = '01ada507287d82875905620988597833ad4e0903' if not os.path.exists(data_dir): os.makedirs(data_dir) file_path = os.path.join(data_dir, 'aclImdb_v1.tar.gz') if not os.path.isfile(file_path): file_path = gluon.utils.download(url, data_dir, sha1_hash=sha1) with tarfile.open(file_path, 'r') as f: f.extractall(data_dir) def read_imdb(folder='train'): ''' Read the IMDB dataset ''' data = [] for label in ['pos', 'neg']: folder_name = os.path.join('/tmp/data/aclImdb/', folder, label) for file in os.listdir(folder_name): with open(os.path.join(folder_name, file), 'rb') as f: review = f.read().decode('utf-8').replace('\n', '').lower() data.append([review, 1 if label == 'pos' else 0]) random.shuffle(data) return data def get_tokenized_imdb(data): ''' Tokenized the words ''' def tokenizer(text): return [tok.lower() for tok in text.split(' ')] return [tokenizer(review) for review, _ in data] def get_vocab_imdb(data): ''' Get the indexed tokens ''' tokenized_data = get_tokenized_imdb(data) counter = collections.Counter([tk for st in tokenized_data for tk in st]) return text.vocab.Vocabulary(counter, min_freq=5) def preprocess_imdb(data, vocab): ''' Make the length of each comment 500 by truncating or adding 0s ''' max_l = 500 def pad(x): return x[:max_l] if len(x) > max_l else x + [0] * (max_l - len(x)) tokenized_data = get_tokenized_imdb(data) features = mx.np.array([pad(vocab.to_indices(x)) for x in tokenized_data]) labels = mx.np.array([score for _, score in data]) return features, labels def run(net, train_dataloader, test_dataloader, num_epochs, ctx, lr): ''' Train a test sentiment model ''' # Define trainer trainer = mx.gluon.Trainer(net.collect_params(), 'adam', {'learning_rate': lr}) # Define loss and evaluation metrics loss = gluon.loss.SoftmaxCrossEntropyLoss() metrics = mx.gluon.metric.CompositeEvalMetric() acc = mx.gluon.metric.Accuracy() nested_metrics = mx.gluon.metric.CompositeEvalMetric() metrics.add([acc, mx.gluon.metric.Loss()]) nested_metrics.add([metrics, mx.gluon.metric.Accuracy()]) # Define estimator est = estimator.Estimator(net=net, loss=loss, train_metrics=nested_metrics, trainer=trainer, context=ctx) # Begin training est.fit(train_data=train_dataloader, val_data=test_dataloader, epochs=num_epochs) return acc def test_estimator_cpu(): ''' Test estimator by doing one pass over each model with synthetic data ''' models = ['TextCNN', 'BiRNN'] ctx = mx.cpu() batch_size = 64 embed_size = 100 lr = 1 num_epochs = 1 train_data = mx.np.random.randint(low=0, high=100, size=(2 * batch_size, 500)) train_label = mx.np.random.randint(low=0, high=2, size=(2 * batch_size,)) val_data = mx.np.random.randint(low=0, high=100, size=(batch_size, 500)) val_label = mx.np.random.randint(low=0, high=2, size=(batch_size,)) train_dataloader = gluon.data.DataLoader(dataset=gluon.data.ArrayDataset(train_data, train_label), batch_size=batch_size, shuffle=True) val_dataloader = gluon.data.DataLoader(dataset=gluon.data.ArrayDataset(val_data, val_label), batch_size=batch_size) vocab_list = mx.np.zeros(shape=(100,)) # Get the model for model in models: if model == 'TextCNN': kernel_sizes, nums_channels = [3, 4, 5], [100, 100, 100] net = TextCNN(vocab_list, embed_size, kernel_sizes, nums_channels) else: num_hiddens, num_layers = 100, 2 net = BiRNN(vocab_list, embed_size, num_hiddens, num_layers) net.initialize(mx.init.Xavier(), ctx=ctx) run(net, train_dataloader, val_dataloader, num_epochs=num_epochs, ctx=ctx, lr=lr) @pytest.mark.seed(7) # using fixed seed to reduce flakiness in accuracy assertion @pytest.mark.skipif(mx.device.num_gpus() < 1, reason="skip if no GPU") def test_estimator_gpu(): ''' Test estimator by training Bidirectional RNN for 5 epochs on the IMDB dataset and verify accuracy ''' ctx = mx.gpu(0) batch_size = 64 num_epochs = 5 embed_size = 100 lr = 0.01 # data download_imdb() train_data, test_data = read_imdb('train'), read_imdb('test') vocab = get_vocab_imdb(train_data) train_set = gluon.data.ArrayDataset(*preprocess_imdb(train_data, vocab)) test_set = gluon.data.ArrayDataset(*preprocess_imdb(test_data, vocab)) train_dataloader = gluon.data.DataLoader(train_set, batch_size, shuffle=True) test_dataloader = gluon.data.DataLoader(test_set, batch_size) # Model num_hiddens, num_layers = 100, 2 net = BiRNN(vocab, embed_size, num_hiddens, num_layers) net.initialize(mx.init.Xavier(), ctx=ctx) net.hybridize() glove_embedding = text.embedding.create( 'glove', pretrained_file_name='glove.6B.100d.txt', vocabulary=vocab) net.embedding.weight.set_data(glove_embedding.idx_to_vec) net.embedding.setattr('grad_req', 'null') acc = run(net, train_dataloader, test_dataloader, num_epochs=num_epochs, ctx=ctx, lr=lr) assert acc.get()[1] > 0.70
apache-2.0
PaddlePaddle/models
tutorials/mobilenetv3_prod/Step1-5/torch2paddle.py
1
1070
import numpy as np import torch import paddle def torch2paddle(): torch_path = "./data/mobilenet_v3_small-047dcff4.pth" paddle_path = "./data/mv3_small_paddle.pdparams" torch_state_dict = torch.load(torch_path) fc_names = ["classifier"] paddle_state_dict = {} for k in torch_state_dict: if "num_batches_tracked" in k: continue v = torch_state_dict[k].detach().cpu().numpy() flag = [i in k for i in fc_names] if any(flag) and "weight" in k: # ignore bias new_shape = [1, 0] + list(range(2, v.ndim)) print( f"name: {k}, ori shape: {v.shape}, new shape: {v.transpose(new_shape).shape}" ) v = v.transpose(new_shape) k = k.replace("running_var", "_variance") k = k.replace("running_mean", "_mean") # if k not in model_state_dict: if False: print(k) else: paddle_state_dict[k] = v paddle.save(paddle_state_dict, paddle_path) if __name__ == "__main__": torch2paddle()
apache-2.0
wilsonkichoi/zipline
zipline/utils/events.py
3
21726
# # Copyright 2014 Quantopian, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from abc import ABCMeta, abstractmethod from collections import namedtuple import six import datetime import pandas as pd import pytz from .context_tricks import nop_context __all__ = [ 'EventManager', 'Event', 'EventRule', 'StatelessRule', 'ComposedRule', 'Always', 'Never', 'AfterOpen', 'BeforeClose', 'NotHalfDay', 'NthTradingDayOfWeek', 'NDaysBeforeLastTradingDayOfWeek', 'NthTradingDayOfMonth', 'NDaysBeforeLastTradingDayOfMonth', 'StatefulRule', 'OncePerDay', # Factory API 'date_rules', 'time_rules', 'make_eventrule', ] MAX_MONTH_RANGE = 26 MAX_WEEK_RANGE = 5 def naive_to_utc(ts): """ Converts a UTC tz-naive timestamp to a tz-aware timestamp. """ # Drop the nanoseconds field. warn=False suppresses the warning # that we are losing the nanoseconds; however, this is intended. return pd.Timestamp(ts.to_pydatetime(warn=False), tz='UTC') def ensure_utc(time, tz='UTC'): """ Normalize a time. If the time is tz-naive, assume it is UTC. """ if not time.tzinfo: time = time.replace(tzinfo=pytz.timezone(tz)) return time.replace(tzinfo=pytz.utc) def _coerce_datetime(maybe_dt): if isinstance(maybe_dt, datetime.datetime): return maybe_dt elif isinstance(maybe_dt, datetime.date): return datetime.datetime( year=maybe_dt.year, month=maybe_dt.month, day=maybe_dt.day, tzinfo=pytz.utc, ) elif isinstance(maybe_dt, (tuple, list)) and len(maybe_dt) == 3: year, month, day = maybe_dt return datetime.datetime( year=year, month=month, day=day, tzinfo=pytz.utc, ) else: raise TypeError('Cannot coerce %s into a datetime.datetime' % type(maybe_dt).__name__) def _out_of_range_error(a, b=None, var='offset'): start = 0 if b is None: end = a - 1 else: start = a end = b - 1 return ValueError( '{var} must be in between {start} and {end} inclusive'.format( var=var, start=start, end=end, ) ) def _td_check(td): seconds = td.total_seconds() # 23400 seconds is 6 hours and 30 minutes. if 60 <= seconds <= 23400: return td else: raise ValueError('offset must be in between 1 minute and 6 hours and' ' 30 minutes inclusive') def _build_offset(offset, kwargs, default): """ Builds the offset argument for event rules. """ if offset is None: if not kwargs: return default # use the default. else: return _td_check(datetime.timedelta(**kwargs)) elif kwargs: raise ValueError('Cannot pass kwargs and an offset') elif isinstance(offset, datetime.timedelta): return _td_check(offset) else: raise TypeError("Must pass 'hours' and/or 'minutes' as keywords") def _build_date(date, kwargs): """ Builds the date argument for event rules. """ if date is None: if not kwargs: raise ValueError('Must pass a date or kwargs') else: return datetime.date(**kwargs) elif kwargs: raise ValueError('Cannot pass kwargs and a date') else: return date def _build_time(time, kwargs): """ Builds the time argument for event rules. """ tz = kwargs.pop('tz', 'UTC') if time: if kwargs: raise ValueError('Cannot pass kwargs and a time') else: return ensure_utc(time, tz) elif not kwargs: raise ValueError('Must pass a time or kwargs') else: return datetime.time(**kwargs) class EventManager(object): """Manages a list of Event objects. This manages the logic for checking the rules and dispatching to the handle_data function of the Events. Parameters ---------- create_context : (BarData) -> context manager, optional An optional callback to produce a context manager to wrap the calls to handle_data. This will be passed the current BarData. """ def __init__(self, create_context=None): self._events = [] self._create_context = ( create_context if create_context is not None else lambda *_: nop_context ) def add_event(self, event, prepend=False): """ Adds an event to the manager. """ if prepend: self._events.insert(0, event) else: self._events.append(event) def handle_data(self, context, data, dt): with self._create_context(data): for event in self._events: event.handle_data( context, data, dt, context.trading_environment, ) class Event(namedtuple('Event', ['rule', 'callback'])): """ An event is a pairing of an EventRule and a callable that will be invoked with the current algorithm context, data, and datetime only when the rule is triggered. """ def __new__(cls, rule=None, callback=None): callback = callback or (lambda *args, **kwargs: None) return super(cls, cls).__new__(cls, rule=rule, callback=callback) def handle_data(self, context, data, dt, env): """ Calls the callable only when the rule is triggered. """ if self.rule.should_trigger(dt, env): self.callback(context, data) class EventRule(six.with_metaclass(ABCMeta)): @abstractmethod def should_trigger(self, dt, env): """ Checks if the rule should trigger with its current state. This method should be pure and NOT mutate any state on the object. """ raise NotImplementedError('should_trigger') class StatelessRule(EventRule): """ A stateless rule has no observable side effects. This is reentrant and will always give the same result for the same datetime. Because these are pure, they can be composed to create new rules. """ def and_(self, rule): """ Logical and of two rules, triggers only when both rules trigger. This follows the short circuiting rules for normal and. """ return ComposedRule(self, rule, ComposedRule.lazy_and) __and__ = and_ class ComposedRule(StatelessRule): """ A rule that composes the results of two rules with some composing function. The composing function should be a binary function that accepts the results first(dt) and second(dt) as positional arguments. For example, operator.and_. If lazy=True, then the lazy composer is used instead. The lazy composer expects a function that takes the two should_trigger functions and the datetime. This is useful of you don't always want to call should_trigger for one of the rules. For example, this is used to implement the & and | operators so that they will have the same short circuit logic that is expected. """ def __init__(self, first, second, composer): if not (isinstance(first, StatelessRule) and isinstance(second, StatelessRule)): raise ValueError('Only two StatelessRules can be composed') self.first = first self.second = second self.composer = composer def should_trigger(self, dt, env): """ Composes the two rules with a lazy composer. """ return self.composer( self.first.should_trigger, self.second.should_trigger, dt, env ) @staticmethod def lazy_and(first_should_trigger, second_should_trigger, dt, env): """ Lazily ands the two rules. This will NOT call the should_trigger of the second rule if the first one returns False. """ return first_should_trigger(dt, env) and second_should_trigger(dt, env) class Always(StatelessRule): """ A rule that always triggers. """ @staticmethod def always_trigger(dt, env): """ A should_trigger implementation that will always trigger. """ return True should_trigger = always_trigger class Never(StatelessRule): """ A rule that never triggers. """ @staticmethod def never_trigger(dt, env): """ A should_trigger implementation that will never trigger. """ return False should_trigger = never_trigger class AfterOpen(StatelessRule): """ A rule that triggers for some offset after the market opens. Example that triggers after 30 minutes of the market opening: >>> AfterOpen(minutes=30) """ def __init__(self, offset=None, **kwargs): self.offset = _build_offset( offset, kwargs, datetime.timedelta(minutes=1), # Defaults to the first minute. ) self._period_start = None self._period_end = None self._period_close = None self._one_minute = datetime.timedelta(minutes=1) def calculate_dates(self, dt, env): # given a dt, find that day's open and period end (open + offset) self._period_start, self._period_close = env.get_open_and_close(dt) self._period_end = \ self._period_start + self.offset - self._one_minute def should_trigger(self, dt, env): # There are two reasons why we might want to recalculate the dates. # One is the first time we ever call should_trigger, when # self._period_start is none. The second is when we're on a new day, # and need to recalculate the dates. For performance reasons, we rely # on the fact that our clock only ever ticks forward, since it's # cheaper to do dt1 <= dt2 than dt1.date() != dt2.date(). This means # that we will NOT correctly recognize a new date if we go backwards # in time(which should never happen in a simulation, or in a live # trading environment) if ( self._period_start is None or self._period_close <= dt ): self.calculate_dates(dt, env) return dt == self._period_end class BeforeClose(StatelessRule): """ A rule that triggers for some offset time before the market closes. Example that triggers for the last 30 minutes every day: >>> BeforeClose(minutes=30) """ def __init__(self, offset=None, **kwargs): self.offset = _build_offset( offset, kwargs, datetime.timedelta(minutes=1), # Defaults to the last minute. ) self._period_start = None self._period_end = None self._one_minute = datetime.timedelta(minutes=1) def calculate_dates(self, dt, env): # given a dt, find that day's close and period start (close - offset) self._period_end = env.get_open_and_close(dt)[1] self._period_start = \ self._period_end - self.offset self._period_close = self._period_end def should_trigger(self, dt, env): # There are two reasons why we might want to recalculate the dates. # One is the first time we ever call should_trigger, when # self._period_start is none. The second is when we're on a new day, # and need to recalculate the dates. For performance reasons, we rely # on the fact that our clock only ever ticks forward, since it's # cheaper to do dt1 <= dt2 than dt1.date() != dt2.date(). This means # that we will NOT correctly recognize a new date if we go backwards # in time(which should never happen in a simulation, or in a live # trading environment) if ( self._period_start is None or self._period_close <= dt ): self.calculate_dates(dt, env) return self._period_start == dt class NotHalfDay(StatelessRule): """ A rule that only triggers when it is not a half day. """ def should_trigger(self, dt, env): return dt.date() not in env.early_closes class TradingDayOfWeekRule(six.with_metaclass(ABCMeta, StatelessRule)): def __init__(self, n=0): if not 0 <= abs(n) < MAX_WEEK_RANGE: raise _out_of_range_error(MAX_WEEK_RANGE) self.td_delta = n self.next_date_start = None self.next_date_end = None self.next_midnight_timestamp = None @abstractmethod def date_func(self, dt, env): raise NotImplementedError def calculate_start_and_end(self, dt, env): next_trading_day = _coerce_datetime( env.add_trading_days( self.td_delta, self.date_func(dt, env), ) ) # If after applying the offset to the start/end day of the week, we get # day in a different week, skip this week and go on to the next while next_trading_day.isocalendar()[1] != dt.isocalendar()[1]: dt += datetime.timedelta(days=7) next_trading_day = _coerce_datetime( env.add_trading_days( self.td_delta, self.date_func(dt, env), ) ) next_open, next_close = env.get_open_and_close(next_trading_day) self.next_date_start = next_open self.next_date_end = next_close self.next_midnight_timestamp = next_trading_day def should_trigger(self, dt, env): if self.next_date_start is None: # First time this method has been called. Calculate the midnight, # open, and close for the first trigger, which occurs on the week # of the simulation start self.calculate_start_and_end(dt, env) # If we've passed the trigger, calculate the next one if dt > self.next_date_end: self.calculate_start_and_end(self.next_date_end + datetime.timedelta(days=7), env) # if the given dt is within the next matching day, return true. if self.next_date_start <= dt <= self.next_date_end or \ dt == self.next_midnight_timestamp: return True return False class NthTradingDayOfWeek(TradingDayOfWeekRule): """ A rule that triggers on the nth trading day of the week. This is zero-indexed, n=0 is the first trading day of the week. """ @staticmethod def get_first_trading_day_of_week(dt, env): prev = dt dt = env.previous_trading_day(dt) # If we're on the first trading day of the TradingEnvironment, # calling previous_trading_day on it will return None, which # will blow up when we try and call .date() on it. The first # trading day of the env is also the first trading day of the # week(in the TradingEnvironment, at least), so just return # that date. if dt is None: return prev while dt.date().weekday() < prev.date().weekday(): prev = dt dt = env.previous_trading_day(dt) if dt is None: return prev if env.is_trading_day(prev): return prev.date() else: return env.next_trading_day(prev).date() date_func = get_first_trading_day_of_week class NDaysBeforeLastTradingDayOfWeek(TradingDayOfWeekRule): """ A rule that triggers n days before the last trading day of the week. """ def __init__(self, n): super(NDaysBeforeLastTradingDayOfWeek, self).__init__(-n) @staticmethod def get_last_trading_day_of_week(dt, env): prev = dt dt = env.next_trading_day(dt) # Traverse forward until we hit a week border, then jump back to the # previous trading day. while dt.date().weekday() > prev.date().weekday(): prev = dt dt = env.next_trading_day(dt) if env.is_trading_day(prev): return prev.date() else: return env.previous_trading_day(prev).date() date_func = get_last_trading_day_of_week class NthTradingDayOfMonth(StatelessRule): """ A rule that triggers on the nth trading day of the month. This is zero-indexed, n=0 is the first trading day of the month. """ def __init__(self, n=0): if not 0 <= n < MAX_MONTH_RANGE: raise _out_of_range_error(MAX_MONTH_RANGE) self.td_delta = n self.month = None self.day = None def should_trigger(self, dt, env): return self.get_nth_trading_day_of_month(dt, env) == dt.date() def get_nth_trading_day_of_month(self, dt, env): if self.month == dt.month: # We already computed the day for this month. return self.day if not self.td_delta: self.day = self.get_first_trading_day_of_month(dt, env) else: self.day = env.add_trading_days( self.td_delta, self.get_first_trading_day_of_month(dt, env), ).date() return self.day def get_first_trading_day_of_month(self, dt, env): self.month = dt.month dt = dt.replace(day=1) self.first_day = (dt if env.is_trading_day(dt) else env.next_trading_day(dt)).date() return self.first_day class NDaysBeforeLastTradingDayOfMonth(StatelessRule): """ A rule that triggers n days before the last trading day of the month. """ def __init__(self, n=0): if not 0 <= n < MAX_MONTH_RANGE: raise _out_of_range_error(MAX_MONTH_RANGE) self.td_delta = -n self.month = None self.day = None def should_trigger(self, dt, env): return self.get_nth_to_last_trading_day_of_month(dt, env) == dt.date() def get_nth_to_last_trading_day_of_month(self, dt, env): if self.month == dt.month: # We already computed the last day for this month. return self.day if not self.td_delta: self.day = self.get_last_trading_day_of_month(dt, env) else: self.day = env.add_trading_days( self.td_delta, self.get_last_trading_day_of_month(dt, env), ).date() return self.day def get_last_trading_day_of_month(self, dt, env): self.month = dt.month if dt.month == 12: # Roll the year forward and start in January. year = dt.year + 1 month = 1 else: # Increment the month in the same year. year = dt.year month = dt.month + 1 self.last_day = env.previous_trading_day( dt.replace(year=year, month=month, day=1) ).date() return self.last_day # Stateful rules class StatefulRule(EventRule): """ A stateful rule has state. This rule will give different results for the same datetimes depending on the internal state that this holds. StatefulRules wrap other rules as state transformers. """ def __init__(self, rule=None): self.rule = rule or Always() def new_should_trigger(self, callable_): """ Replace the should trigger implementation for the current rule. """ self.should_trigger = callable_ class OncePerDay(StatefulRule): def __init__(self, rule=None): self.triggered = False self.date = None self.next_date = None super(OncePerDay, self).__init__(rule) def should_trigger(self, dt, env): if self.date is None or dt >= self.next_date: # initialize or reset for new date self.triggered = False self.date = dt # record the timestamp for the next day, so that we can use it # to know if we've moved to the next day self.next_date = dt + pd.Timedelta(1, unit="d") if not self.triggered and self.rule.should_trigger(dt, env): self.triggered = True return True # Factory API class date_rules(object): every_day = Always @staticmethod def month_start(days_offset=0): return NthTradingDayOfMonth(n=days_offset) @staticmethod def month_end(days_offset=0): return NDaysBeforeLastTradingDayOfMonth(n=days_offset) @staticmethod def week_start(days_offset=0): return NthTradingDayOfWeek(n=days_offset) @staticmethod def week_end(days_offset=0): return NDaysBeforeLastTradingDayOfWeek(n=days_offset) class time_rules(object): market_open = AfterOpen market_close = BeforeClose def make_eventrule(date_rule, time_rule, half_days=True): """ Constructs an event rule from the factory api. """ if half_days: inner_rule = date_rule & time_rule else: inner_rule = date_rule & time_rule & NotHalfDay() return OncePerDay(rule=inner_rule)
apache-2.0
arizona-phonological-imaging-lab/Autotrace
under-development/a3/lib.py
1
5947
#!/usr/bin/env python3 import os import logging from glob import glob import fnmatch import h5py from PIL import Image import numpy as np from .roi import ROI def get_from_files(d,path,roi,scale=1,n_points=32,buff=512,blacklist=[]): """Create an hdf5 dataset from a folder of images and traces Tries to match names of traces with names of images. Args: d (str): The path of a folder. The folder is recursively searched. path (str): Where to save the dataset Any existing file will be overwritten without warning roi (ROI): The partof each image to extract. scale (numeric, optional): A factor by which to scale the images. Defaults to 1 (no scaling). A better setting might be 0.1 n_points (int, optional): The number of points in each trace Defaults to 32 buff (int, optional): Number of images to buffer before writing Defaults to 512 blacklist (container): Set of image filenames to ignore This is particularly useful for making disjoint training / testing datasets Defaults to the empty list (i.e. nothing excluded) """ images = [] traces = [] names = [] roi = ROI(roi) roi_s = roi.scale(scale) if os.path.exists(path): os.remove(path) hp = h5py.File(path,'w') hp.create_dataset('image', (0,1) + roi_s.shape, maxshape = (None,1) + roi_s.shape, chunks = (buff,1) + roi_s.shape, compression='gzip') hp.create_dataset('trace', (0,n_points,1,1), maxshape = (None,n_points,1,1), chunks = (buff,n_points,1,1), compression='gzip') try: unicode except NameError: unicode = str hp.create_dataset('name', (0,), maxshape = (None,), chunks = (buff,), dtype=h5py.special_dtype(vlen=unicode), compression='gzip') # traverse d for root,__,filenames in os.walk(d): # look for hand-traced traces for filename in fnmatch.filter(filenames,'*.ghp.traced.txt'): # because it matched the above fnmatch, we can assume it # ends with '.ghp.traced.txt' and remove that ending. # the rest is our target base = filename[:-len('.ghp.traced.txt')] # look for our target f = None if os.path.isfile(os.path.join(root,base)): f = os.path.join(root,base) else: g = glob(os.path.join(root,'..','[sS]ubject*','IMAGES',base)) if g: f = g[0] # if we found it, then put it and our trace in the list if f: if os.path.basename(f) not in blacklist: image = image_from_file(f,roi,scale) trace = trace_from_file(os.path.join(root,filename), roi,n_points) try: if image.any() and trace.any(): images.append(image) traces.append(trace) names.append( os.path.basename(f) ) except AttributeError: logging.error("%s %s" % (image, trace)) raise else: logging.debug("excluding file %s" % (os.path.basename(f))) if len(images) >= buff: s = hp['image'].shape[0] images_add = np.array(images[:buff],dtype='float32') traces_add = np.array(traces[:buff],dtype='float32') hp['image'].resize(s+buff,0) hp['image'][s:] = images_add hp['trace'].resize(s+buff,0) hp['trace'][s:] = traces_add hp['name'].resize(s+buff,0) hp['name'][s:] = names[:buff] images = images[buff:] traces = traces[buff:] names = names[buff:] logging.info( "image: %s trace: %s name %s" % (hp['image'].shape, hp['trace'].shape, hp['name'].shape)) logging.info( "image: %s trace: %s name %s" % (hp['image'].shape, hp['trace'].shape, hp['name'].shape)) hp.close() def image_from_file(f,roi,scale=.01): """Extract a porperly scaled section of an image Args: f (str): The path to an image roi (ROI): The part of the image to extract scale """ roi = ROI(roi) roi_scale = roi.scale(scale) img = Image.open(f) img = img.convert('L') img.thumbnail((img.size[0] * scale, img.size[1] * scale)) img = np.array(img,dtype='float32') img = img / 255 img = np.array(img[roi_scale.slice],dtype='float32') img = img.reshape(1,img.shape[0],img.shape[1]) return img def trace_from_file(fname,roi,n_points): """Extract a trace from a trace file Uses a linear interpolation of the trace to extract evenly-spaced points Args: fname (str): The path to a trace file. roi (ROI): The space accross which to evenly space the points n_points (int): The nuber of points to extract """ roi = ROI(roi) gold_xs = [] gold_ys = [] with open(fname) as f: for l in f: l = l.split() if int(l[0]) > 0: gold_xs.append(float(l[1])) gold_ys.append(float(l[2])) gold_xs = np.array(gold_xs,dtype='float32') gold_ys = np.array(gold_ys,dtype='float32') if len(gold_xs) > 0: trace = np.interp(roi.domain(n_points),gold_xs,gold_ys,left=0,right=0) trace = trace.reshape((n_points,1,1)) trace[trace==0] = roi.offset[0] trace = (trace - roi.offset[0]) / (roi.height) else: return np.array(0) if trace.sum() > 0 : return trace else: return np.array(0)
mit
florian-f/sklearn
sklearn/datasets/mldata.py
3
6872
"""Automatically download MLdata datasets.""" # Copyright (c) 2011 Pietro Berkes # License: Simplified BSD import os from os.path import join, exists import re import numbers try: # Python 2 from urllib2 import HTTPError from urllib2 import quote from urllib2 import urlopen except ImportError: # Python 3+ from urllib.error import HTTPError from urllib.parse import quote from urllib.request import urlopen import scipy as sp from scipy import io from shutil import copyfileobj from .base import get_data_home, Bunch MLDATA_BASE_URL = "http://mldata.org/repository/data/download/matlab/%s" def mldata_filename(dataname): """Convert a raw name for a data set in a mldata.org filename.""" dataname = dataname.lower().replace(' ', '-') return re.sub(r'[().]', '', dataname) def fetch_mldata(dataname, target_name='label', data_name='data', transpose_data=True, data_home=None): """Fetch an mldata.org data set If the file does not exist yet, it is downloaded from mldata.org . mldata.org does not have an enforced convention for storing data or naming the columns in a data set. The default behavior of this function works well with the most common cases: 1) data values are stored in the column 'data', and target values in the column 'label' 2) alternatively, the first column stores target values, and the second data values 3) the data array is stored as `n_features x n_samples` , and thus needs to be transposed to match the `sklearn` standard Keyword arguments allow to adapt these defaults to specific data sets (see parameters `target_name`, `data_name`, `transpose_data`, and the examples below). mldata.org data sets may have multiple columns, which are stored in the Bunch object with their original name. Parameters ---------- dataname: Name of the data set on mldata.org, e.g.: "leukemia", "Whistler Daily Snowfall", etc. The raw name is automatically converted to a mldata.org URL . target_name: optional, default: 'label' Name or index of the column containing the target values. data_name: optional, default: 'data' Name or index of the column containing the data. transpose_data: optional, default: True If True, transpose the downloaded data array. data_home: optional, default: None Specify another download and cache folder for the data sets. By default all scikit learn data is stored in '~/scikit_learn_data' subfolders. Returns ------- data : Bunch Dictionary-like object, the interesting attributes are: 'data', the data to learn, 'target', the classification labels, 'DESCR', the full description of the dataset, and 'COL_NAMES', the original names of the dataset columns. Examples -------- Load the 'iris' dataset from mldata.org: >>> from sklearn.datasets.mldata import fetch_mldata >>> iris = fetch_mldata('iris') >>> iris.target[0] 1 >>> print(iris.data[0]) [-0.555556 0.25 -0.864407 -0.916667] Load the 'leukemia' dataset from mldata.org, which needs to be transposed to respects the sklearn axes convention: >>> leuk = fetch_mldata('leukemia', transpose_data=True) >>> print(leuk.data.shape[0]) 72 Load an alternative 'iris' dataset, which has different names for the columns: >>> iris2 = fetch_mldata('datasets-UCI iris', target_name=1, ... data_name=0) >>> iris3 = fetch_mldata('datasets-UCI iris', ... target_name='class', data_name='double0') """ # normalize dataset name dataname = mldata_filename(dataname) # check if this data set has been already downloaded data_home = get_data_home(data_home=data_home) data_home = join(data_home, 'mldata') if not exists(data_home): os.makedirs(data_home) matlab_name = dataname + '.mat' filename = join(data_home, matlab_name) # if the file does not exist, download it if not exists(filename): urlname = MLDATA_BASE_URL % quote(dataname) try: mldata_url = urlopen(urlname) except HTTPError as e: if e.code == 404: e.msg = "Dataset '%s' not found on mldata.org." % dataname raise # store Matlab file try: with open(filename, 'w+b') as matlab_file: copyfileobj(mldata_url, matlab_file) except: os.remove(filename) raise mldata_url.close() # load dataset matlab file with open(filename, 'rb') as matlab_file: matlab_dict = io.loadmat(matlab_file, struct_as_record=True) # -- extract data from matlab_dict # flatten column names col_names = [str(descr[0]) for descr in matlab_dict['mldata_descr_ordering'][0]] # if target or data names are indices, transform then into names if isinstance(target_name, numbers.Integral): target_name = col_names[target_name] if isinstance(data_name, numbers.Integral): data_name = col_names[data_name] # rules for making sense of the mldata.org data format # (earlier ones have priority): # 1) there is only one array => it is "data" # 2) there are multiple arrays # a) copy all columns in the bunch, using their column name # b) if there is a column called `target_name`, set "target" to it, # otherwise set "target" to first column # c) if there is a column called `data_name`, set "data" to it, # otherwise set "data" to second column dataset = {'DESCR': 'mldata.org dataset: %s' % dataname, 'COL_NAMES': col_names} # 1) there is only one array => it is considered data if len(col_names) == 1: data_name = col_names[0] dataset['data'] = matlab_dict[data_name] # 2) there are multiple arrays else: for name in col_names: dataset[name] = matlab_dict[name] if target_name in col_names: del dataset[target_name] dataset['target'] = matlab_dict[target_name] else: del dataset[col_names[0]] dataset['target'] = matlab_dict[col_names[0]] if data_name in col_names: del dataset[data_name] dataset['data'] = matlab_dict[data_name] else: del dataset[col_names[1]] dataset['data'] = matlab_dict[col_names[1]] # set axes to sklearn conventions if transpose_data: dataset['data'] = dataset['data'].T if 'target' in dataset: if not sp.sparse.issparse(dataset['target']): dataset['target'] = dataset['target'].squeeze() return Bunch(**dataset)
bsd-3-clause
anntzer/scikit-learn
examples/cluster/plot_dbscan.py
6
2608
# -*- coding: utf-8 -*- """ =================================== Demo of DBSCAN clustering algorithm =================================== DBSCAN (Density-Based Spatial Clustering of Applications with Noise) finds core samples of high density and expands clusters from them. This algorithm is good for data which contains clusters of similar density. """ import numpy as np from sklearn.cluster import DBSCAN from sklearn import metrics from sklearn.datasets import make_blobs from sklearn.preprocessing import StandardScaler # %% # Generate sample data # -------------------- centers = [[1, 1], [-1, -1], [1, -1]] X, labels_true = make_blobs( n_samples=750, centers=centers, cluster_std=0.4, random_state=0 ) X = StandardScaler().fit_transform(X) # %% # Compute DBSCAN # -------------- db = DBSCAN(eps=0.3, min_samples=10).fit(X) core_samples_mask = np.zeros_like(db.labels_, dtype=bool) core_samples_mask[db.core_sample_indices_] = True labels = db.labels_ # Number of clusters in labels, ignoring noise if present. n_clusters_ = len(set(labels)) - (1 if -1 in labels else 0) n_noise_ = list(labels).count(-1) print("Estimated number of clusters: %d" % n_clusters_) print("Estimated number of noise points: %d" % n_noise_) print("Homogeneity: %0.3f" % metrics.homogeneity_score(labels_true, labels)) print("Completeness: %0.3f" % metrics.completeness_score(labels_true, labels)) print("V-measure: %0.3f" % metrics.v_measure_score(labels_true, labels)) print("Adjusted Rand Index: %0.3f" % metrics.adjusted_rand_score(labels_true, labels)) print( "Adjusted Mutual Information: %0.3f" % metrics.adjusted_mutual_info_score(labels_true, labels) ) print("Silhouette Coefficient: %0.3f" % metrics.silhouette_score(X, labels)) # %% # Plot result # ----------- import matplotlib.pyplot as plt # Black removed and is used for noise instead. unique_labels = set(labels) colors = [plt.cm.Spectral(each) for each in np.linspace(0, 1, len(unique_labels))] for k, col in zip(unique_labels, colors): if k == -1: # Black used for noise. col = [0, 0, 0, 1] class_member_mask = labels == k xy = X[class_member_mask & core_samples_mask] plt.plot( xy[:, 0], xy[:, 1], "o", markerfacecolor=tuple(col), markeredgecolor="k", markersize=14, ) xy = X[class_member_mask & ~core_samples_mask] plt.plot( xy[:, 0], xy[:, 1], "o", markerfacecolor=tuple(col), markeredgecolor="k", markersize=6, ) plt.title("Estimated number of clusters: %d" % n_clusters_) plt.show()
bsd-3-clause
anntzer/scikit-learn
sklearn/metrics/_plot/precision_recall_curve.py
8
13487
from sklearn.base import is_classifier from .base import _get_response from .. import average_precision_score from .. import precision_recall_curve from .._base import _check_pos_label_consistency from .._classification import check_consistent_length from ...utils import check_matplotlib_support class PrecisionRecallDisplay: """Precision Recall visualization. It is recommend to use :func:`~sklearn.metrics.PrecisionRecallDisplay.from_estimator` or :func:`~sklearn.metrics.PrecisionRecallDisplay.from_predictions` to create a :class:`~sklearn.metrics.PredictionRecallDisplay`. All parameters are stored as attributes. Read more in the :ref:`User Guide <visualizations>`. Parameters ---------- precision : ndarray Precision values. recall : ndarray Recall values. average_precision : float, default=None Average precision. If None, the average precision is not shown. estimator_name : str, default=None Name of estimator. If None, then the estimator name is not shown. pos_label : str or int, default=None The class considered as the positive class. If None, the class will not be shown in the legend. .. versionadded:: 0.24 Attributes ---------- line_ : matplotlib Artist Precision recall curve. ax_ : matplotlib Axes Axes with precision recall curve. figure_ : matplotlib Figure Figure containing the curve. See Also -------- precision_recall_curve : Compute precision-recall pairs for different probability thresholds. PrecisionRecallDisplay.from_estimator : Plot Precision Recall Curve given a binary classifier. PrecisionRecallDisplay.from_predictions : Plot Precision Recall Curve using predictions from a binary classifier. Notes ----- The average precision (cf. :func:`~sklearn.metrics.average_precision`) in scikit-learn is computed without any interpolation. To be consistent with this metric, the precision-recall curve is plotted without any interpolation as well (step-wise style). You can change this style by passing the keyword argument `drawstyle="default"` in :meth:`plot`, :meth:`from_estimator`, or :meth:`from_predictions`. However, the curve will not be strictly consistent with the reported average precision. Examples -------- >>> import matplotlib.pyplot as plt >>> from sklearn.datasets import make_classification >>> from sklearn.metrics import (precision_recall_curve, ... PrecisionRecallDisplay) >>> from sklearn.model_selection import train_test_split >>> from sklearn.svm import SVC >>> X, y = make_classification(random_state=0) >>> X_train, X_test, y_train, y_test = train_test_split(X, y, ... random_state=0) >>> clf = SVC(random_state=0) >>> clf.fit(X_train, y_train) SVC(random_state=0) >>> predictions = clf.predict(X_test) >>> precision, recall, _ = precision_recall_curve(y_test, predictions) >>> disp = PrecisionRecallDisplay(precision=precision, recall=recall) >>> disp.plot() <...> >>> plt.show() """ def __init__( self, precision, recall, *, average_precision=None, estimator_name=None, pos_label=None, ): self.estimator_name = estimator_name self.precision = precision self.recall = recall self.average_precision = average_precision self.pos_label = pos_label def plot(self, ax=None, *, name=None, **kwargs): """Plot visualization. Extra keyword arguments will be passed to matplotlib's `plot`. Parameters ---------- ax : Matplotlib Axes, default=None Axes object to plot on. If `None`, a new figure and axes is created. name : str, default=None Name of precision recall curve for labeling. If `None`, use `estimator_name` if not `None`, otherwise no labeling is shown. **kwargs : dict Keyword arguments to be passed to matplotlib's `plot`. Returns ------- display : :class:`~sklearn.metrics.PrecisionRecallDisplay` Object that stores computed values. Notes ----- The average precision (cf. :func:`~sklearn.metrics.average_precision`) in scikit-learn is computed without any interpolation. To be consistent with this metric, the precision-recall curve is plotted without any interpolation as well (step-wise style). You can change this style by passing the keyword argument `drawstyle="default"`. However, the curve will not be strictly consistent with the reported average precision. """ check_matplotlib_support("PrecisionRecallDisplay.plot") name = self.estimator_name if name is None else name line_kwargs = {"drawstyle": "steps-post"} if self.average_precision is not None and name is not None: line_kwargs["label"] = f"{name} (AP = {self.average_precision:0.2f})" elif self.average_precision is not None: line_kwargs["label"] = f"AP = {self.average_precision:0.2f}" elif name is not None: line_kwargs["label"] = name line_kwargs.update(**kwargs) import matplotlib.pyplot as plt if ax is None: fig, ax = plt.subplots() (self.line_,) = ax.plot(self.recall, self.precision, **line_kwargs) info_pos_label = ( f" (Positive label: {self.pos_label})" if self.pos_label is not None else "" ) xlabel = "Recall" + info_pos_label ylabel = "Precision" + info_pos_label ax.set(xlabel=xlabel, ylabel=ylabel) if "label" in line_kwargs: ax.legend(loc="lower left") self.ax_ = ax self.figure_ = ax.figure return self @classmethod def from_estimator( cls, estimator, X, y, *, sample_weight=None, pos_label=None, response_method="auto", name=None, ax=None, **kwargs, ): """Plot precision-recall curve given an estimator and some data. Parameters ---------- estimator : estimator instance Fitted classifier or a fitted :class:`~sklearn.pipeline.Pipeline` in which the last estimator is a classifier. X : {array-like, sparse matrix} of shape (n_samples, n_features) Input values. y : array-like of shape (n_samples,) Target values. sample_weight : array-like of shape (n_samples,), default=None Sample weights. pos_label : str or int, default=None The class considered as the positive class when computing the precision and recall metrics. By default, `estimators.classes_[1]` is considered as the positive class. response_method : {'predict_proba', 'decision_function', 'auto'}, \ default='auto' Specifies whether to use :term:`predict_proba` or :term:`decision_function` as the target response. If set to 'auto', :term:`predict_proba` is tried first and if it does not exist :term:`decision_function` is tried next. name : str, default=None Name for labeling curve. If `None`, no name is used. ax : matplotlib axes, default=None Axes object to plot on. If `None`, a new figure and axes is created. **kwargs : dict Keyword arguments to be passed to matplotlib's `plot`. Returns ------- display : :class:`~sklearn.metrics.PrecisionRecallDisplay` See Also -------- PrecisionRecallDisplay.from_predictions : Plot precision-recall curve using estimated probabilities or output of decision function. Notes ----- The average precision (cf. :func:`~sklearn.metrics.average_precision`) in scikit-learn is computed without any interpolation. To be consistent with this metric, the precision-recall curve is plotted without any interpolation as well (step-wise style). You can change this style by passing the keyword argument `drawstyle="default"`. However, the curve will not be strictly consistent with the reported average precision. Examples -------- >>> import matplotlib.pyplot as plt >>> from sklearn.datasets import make_classification >>> from sklearn.metrics import PrecisionRecallDisplay >>> from sklearn.model_selection import train_test_split >>> from sklearn.linear_model import LogisticRegression >>> X, y = make_classification(random_state=0) >>> X_train, X_test, y_train, y_test = train_test_split( ... X, y, random_state=0) >>> clf = LogisticRegression() >>> clf.fit(X_train, y_train) LogisticRegression() >>> PrecisionRecallDisplay.from_estimator( ... clf, X_test, y_test) <...> >>> plt.show() """ method_name = f"{cls.__name__}.from_estimator" check_matplotlib_support(method_name) if not is_classifier(estimator): raise ValueError(f"{method_name} only supports classifiers") y_pred, pos_label = _get_response( X, estimator, response_method, pos_label=pos_label, ) name = name if name is not None else estimator.__class__.__name__ return cls.from_predictions( y, y_pred, sample_weight=sample_weight, name=name, pos_label=pos_label, ax=ax, **kwargs, ) @classmethod def from_predictions( cls, y_true, y_pred, *, sample_weight=None, pos_label=None, name=None, ax=None, **kwargs, ): """Plot precision-recall curve given binary class predictions. Parameters ---------- y_true : array-like of shape (n_samples,) True binary labels. y_pred : array-like of shape (n_samples,) Estimated probabilities or output of decision function. sample_weight : array-like of shape (n_samples,), default=None Sample weights. pos_label : str or int, default=None The class considered as the positive class when computing the precision and recall metrics. name : str, default=None Name for labeling curve. If `None`, name will be set to `"Classifier"`. ax : matplotlib axes, default=None Axes object to plot on. If `None`, a new figure and axes is created. **kwargs : dict Keyword arguments to be passed to matplotlib's `plot`. Returns ------- display : :class:`~sklearn.metrics.PrecisionRecallDisplay` See Also -------- PrecisionRecallDisplay.from_estimator : Plot precision-recall curve using an estimator. Notes ----- The average precision (cf. :func:`~sklearn.metrics.average_precision`) in scikit-learn is computed without any interpolation. To be consistent with this metric, the precision-recall curve is plotted without any interpolation as well (step-wise style). You can change this style by passing the keyword argument `drawstyle="default"`. However, the curve will not be strictly consistent with the reported average precision. Examples -------- >>> import matplotlib.pyplot as plt >>> from sklearn.datasets import make_classification >>> from sklearn.metrics import PrecisionRecallDisplay >>> from sklearn.model_selection import train_test_split >>> from sklearn.linear_model import LogisticRegression >>> X, y = make_classification(random_state=0) >>> X_train, X_test, y_train, y_test = train_test_split( ... X, y, random_state=0) >>> clf = LogisticRegression() >>> clf.fit(X_train, y_train) LogisticRegression() >>> y_pred = clf.predict_proba(X_test)[:, 1] >>> PrecisionRecallDisplay.from_predictions( ... y_test, y_pred) <...> >>> plt.show() """ check_matplotlib_support(f"{cls.__name__}.from_predictions") check_consistent_length(y_true, y_pred, sample_weight) pos_label = _check_pos_label_consistency(pos_label, y_true) precision, recall, _ = precision_recall_curve( y_true, y_pred, pos_label=pos_label, sample_weight=sample_weight ) average_precision = average_precision_score( y_true, y_pred, pos_label=pos_label, sample_weight=sample_weight ) name = name if name is not None else "Classifier" viz = PrecisionRecallDisplay( precision=precision, recall=recall, average_precision=average_precision, estimator_name=name, pos_label=pos_label, ) return viz.plot(ax=ax, name=name, **kwargs)
bsd-3-clause
chaen/DIRAC
Resources/Catalog/FileCatalogClient.py
3
23994
""" The FileCatalogClient is a class representing the client of the DIRAC File Catalog """ import os from DIRAC import S_OK, S_ERROR from DIRAC.Core.DISET.TransferClient import TransferClient from DIRAC.Core.Security.ProxyInfo import getVOfromProxyGroup from DIRAC.ConfigurationSystem.Client.Helpers.Registry import getVOMSAttributeForGroup, getDNForUsername from DIRAC.Resources.Catalog.Utilities import checkCatalogArguments from DIRAC.Resources.Catalog.FileCatalogClientBase import FileCatalogClientBase __RCSID__ = "$Id$" class FileCatalogClient(FileCatalogClientBase): """ Client code to the DIRAC File Catalogue """ # The list of methods below is defining the client interface READ_METHODS = FileCatalogClientBase.READ_METHODS + \ ['isFile', 'getFileMetadata', 'getReplicas', 'getReplicaStatus', 'getFileSize', 'isDirectory', 'getDirectoryReplicas', 'listDirectory', 'getDirectoryMetadata', 'getDirectorySize', 'getDirectoryContents', 'getLFNForPFN', 'getLFNForGUID', 'findFilesByMetadata', 'getMetadataFields', 'findDirectoriesByMetadata', 'getReplicasByMetadata', 'findFilesByMetadataDetailed', 'findFilesByMetadataWeb', 'getCompatibleMetadata', 'getMetadataSet', 'getDatasets', 'getFileDescendents', 'getFileAncestors', 'getDirectoryUserMetadata', 'getFileUserMetadata', 'checkDataset', 'getDatasetParameters', 'getDatasetFiles', 'getDatasetAnnotation'] WRITE_METHODS = [ 'createLink', 'removeLink', 'addFile', 'setFileStatus', 'addReplica', 'removeReplica', 'removeFile', 'setReplicaStatus', 'setReplicaHost', 'setReplicaProblematic', 'createDirectory', 'setDirectoryStatus', 'removeDirectory', 'changePathMode', 'changePathOwner', 'changePathGroup', 'addMetadataField', 'deleteMetadataField', 'setMetadata', 'setMetadataBulk', 'removeMetadata', 'addMetadataSet', 'addDataset', 'addDatasetAnnotation', 'removeDataset', 'updateDataset', 'freezeDataset', 'releaseDataset', 'addUser', 'deleteUser', 'addGroup', 'deleteGroup', 'repairCatalog', 'rebuildDirectoryUsage'] NO_LFN_METHODS = [ 'findFilesByMetadata', 'addMetadataField', 'deleteMetadataField', 'getMetadataFields', 'setMetadata', 'setMetadataBulk', 'removeMetadata', 'getDirectoryUserMetadata', 'findDirectoriesByMetadata', 'getReplicasByMetadata', 'findFilesByMetadataDetailed', 'findFilesByMetadataWeb', 'getCompatibleMetadata', 'addMetadataSet', 'getMetadataSet', 'getFileUserMetadata', 'getLFNForGUID', 'addUser', 'deleteUser', 'addGroup', 'deleteGroup', 'repairCatalog', 'rebuildDirectoryUsage'] ADMIN_METHODS = ['addUser', 'deleteUser', 'addGroup', 'deleteGroup', 'getUsers', 'getGroups', 'getCatalogCounters', 'repairCatalog', 'rebuildDirectoryUsage'] def __init__(self, url=None, **kwargs): """ Constructor function. """ self.serverURL = 'DataManagement/FileCatalog' if not url else url super(FileCatalogClient, self).__init__(self.serverURL, **kwargs) ################################################################################## # ################################################################################## @checkCatalogArguments def getReplicas(self, lfns, allStatus=False, timeout=120): """ Get the replicas of the given files """ rpcClient = self._getRPC(timeout=timeout) result = rpcClient.getReplicas(lfns, allStatus) if not result['OK']: return result vo = getVOfromProxyGroup().get('Value', None) lfnDict = result['Value'] seDict = result['Value'].get('SEPrefixes', {}) for lfn in lfnDict['Successful']: for se in lfnDict['Successful'][lfn]: if not lfnDict['Successful'][lfn][se]: # The PFN was not returned, construct it on the fly # For some VO's the prefix can be non-standard voPrefix = seDict.get("VOPrefix", {}).get(se, {}).get(vo) sePrefix = seDict.get(se, '') prefix = voPrefix if voPrefix else sePrefix lfnDict['Successful'][lfn][se] = prefix + lfn return S_OK(lfnDict) @checkCatalogArguments def setReplicaProblematic(self, lfns, revert=False): """ Set replicas to problematic. :param lfn lfns: has to be formated this way : { lfn : { se1 : pfn1, se2 : pfn2, ...}, ...} :param revert: If True, remove the problematic flag :return: { successful : { lfn : [ ses ] } : failed : { lfn : { se : msg } } } """ # This method does a batch treatment because the setReplicaStatus can only take one replica per lfn at once # # Illustration : # # lfns {'L2': {'S1': 'P3'}, 'L3': {'S3': 'P5', 'S2': 'P4', 'S4': 'P6'}, 'L1': {'S2': 'P2', 'S1': 'P1'}} # # loop1: lfnSEs {'L2': ['S1'], 'L3': ['S3', 'S2', 'S4'], 'L1': ['S2', 'S1']} # loop1 : batch {'L2': {'Status': 'P', 'SE': 'S1', 'PFN': 'P3'}, # 'L3': {'Status': 'P', 'SE': 'S4', 'PFN': 'P6'}, # 'L1': {'Status': 'P', 'SE': 'S1', 'PFN': 'P1'}} # # loop2: lfnSEs {'L2': [], 'L3': ['S3', 'S2'], 'L1': ['S2']} # loop2 : batch {'L3': {'Status': 'P', 'SE': 'S2', 'PFN': 'P4'}, 'L1': {'Status': 'P', 'SE': 'S2', 'PFN': 'P2'}} # # loop3: lfnSEs {'L3': ['S3'], 'L1': []} # loop3 : batch {'L3': {'Status': 'P', 'SE': 'S3', 'PFN': 'P5'}} # # loop4: lfnSEs {'L3': []} # loop4 : batch {} successful = {} failed = {} status = 'AprioriGood' if revert else 'Trash' # { lfn : [ se1, se2, ...], ...} lfnsSEs = dict((lfn, [se for se in lfns[lfn]]) for lfn in lfns) while lfnsSEs: # { lfn : { 'SE' : se1, 'PFN' : pfn1, 'Status' : status }, ... } batch = {} for lfn in lfnsSEs.keys(): # If there are still some Replicas (SE) for the given LFN, we put it in the next batch # else we remove the entry from the lfnsSEs dict if lfnsSEs[lfn]: se = lfnsSEs[lfn].pop() batch[lfn] = {'SE': se, 'PFN': lfns[lfn][se], 'Status': status} else: del lfnsSEs[lfn] # Happens when there is nothing to treat anymore if not batch: break res = self.setReplicaStatus(batch) if not res['OK']: for lfn in batch: failed.setdefault(lfn, {})[batch[lfn]['SE']] = res['Message'] continue for lfn in res['Value']['Failed']: failed.setdefault(lfn, {})[batch[lfn]['SE']] = res['Value']['Failed'][lfn] for lfn in res['Value']['Successful']: successful.setdefault(lfn, []).append(batch[lfn]['SE']) return S_OK({'Successful': successful, 'Failed': failed}) @checkCatalogArguments def listDirectory(self, lfn, verbose=False, timeout=120): """ List the given directory's contents """ rpcClient = self._getRPC(timeout=timeout) result = rpcClient.listDirectory(lfn, verbose) if not result['OK']: return result # Force returned directory entries to be LFNs for entryType in ['Files', 'SubDirs', 'Links']: for path in result['Value']['Successful']: entryDict = result['Value']['Successful'][path][entryType] for fname in entryDict.keys(): detailsDict = entryDict.pop(fname) lfn = os.path.join(path, os.path.basename(fname)) entryDict[lfn] = detailsDict return result @checkCatalogArguments def getDirectoryMetadata(self, lfns, timeout=120): ''' Get standard directory metadata ''' rpcClient = self._getRPC(timeout=timeout) result = rpcClient.getDirectoryMetadata(lfns) if not result['OK']: return result # Add some useful fields for path in result['Value']['Successful']: owner = result['Value']['Successful'][path]['Owner'] group = result['Value']['Successful'][path]['OwnerGroup'] res = getDNForUsername(owner) if res['OK']: result['Value']['Successful'][path]['OwnerDN'] = res['Value'][0] else: result['Value']['Successful'][path]['OwnerDN'] = '' result['Value']['Successful'][path]['OwnerRole'] = getVOMSAttributeForGroup(group) return result @checkCatalogArguments def removeDirectory(self, lfn, recursive=False, timeout=120): """ Remove the directory from the File Catalog. The recursive keyword is for the ineterface. """ rpcClient = self._getRPC(timeout=timeout) return rpcClient.removeDirectory(lfn) @checkCatalogArguments def getDirectoryReplicas(self, lfns, allStatus=False, timeout=120): """ Find all the given directories' replicas """ rpcClient = self._getRPC(timeout=timeout) result = rpcClient.getDirectoryReplicas(lfns, allStatus) if not result['OK']: return result seDict = result['Value'].get('SEPrefixes', {}) for path in result['Value']['Successful']: pathDict = result['Value']['Successful'][path] for fname in pathDict.keys(): detailsDict = pathDict.pop(fname) lfn = '%s/%s' % (path, os.path.basename(fname)) for se in detailsDict: if not detailsDict[se] and se in seDict: detailsDict[se] = seDict[se] + lfn pathDict[lfn] = detailsDict return result def findFilesByMetadata(self, metaDict, path='/', timeout=120): """ Find files given the meta data query and the path """ rpcClient = self._getRPC(timeout=timeout) result = rpcClient.findFilesByMetadata(metaDict, path) if not result['OK']: return result if isinstance(result['Value'], list): return result elif isinstance(result['Value'], dict): # Process into the lfn list fileList = [] for dir_, fList in result['Value'].items(): for fi in fList: fileList.append(dir_ + '/' + fi) result['Value'] = fileList return result else: return S_ERROR('Illegal return value type %s' % type(result['Value'])) def getFileUserMetadata(self, path, timeout=120): """Get the meta data attached to a file, but also to the its corresponding directory """ directory = "/".join(path.split("/")[:-1]) rpcClient = self._getRPC(timeout=timeout) result = rpcClient.getFileUserMetadata(path) if not result['OK']: return result fmeta = result['Value'] result = rpcClient.getDirectoryUserMetadata(directory) if not result['OK']: return result fmeta.update(result['Value']) return S_OK(fmeta) ######################################################################## # Path operations (not updated) # @checkCatalogArguments def changePathOwner(self, lfns, recursive=False, timeout=120): """ Get replica info for the given list of LFNs """ return self._getRPC(timeout=timeout).changePathOwner(lfns, recursive) @checkCatalogArguments def changePathGroup(self, lfns, recursive=False, timeout=120): """ Get replica info for the given list of LFNs """ return self._getRPC(timeout=timeout).changePathGroup(lfns, recursive) @checkCatalogArguments def changePathMode(self, lfns, recursive=False, timeout=120): """ Get replica info for the given list of LFNs """ return self._getRPC(timeout=timeout).changePathMode(lfns, recursive) ######################################################################## # ACL Operations # @checkCatalogArguments def getPathPermissions(self, lfns, timeout=120): """ Determine the ACL information for a supplied path """ return self._getRPC(timeout=timeout).getPathPermissions(lfns) @checkCatalogArguments def hasAccess(self, paths, opType, timeout=120): """ Determine if the given op can be performed on the paths The OpType is all the operations exported """ return self._getRPC(timeout=timeout).hasAccess(paths, opType) ################################################################### # # User/Group write operations # def addUser(self, userName, timeout=120): """ Add a new user to the File Catalog """ return self._getRPC(timeout=timeout).addUser(userName) def deleteUser(self, userName, timeout=120): """ Delete user from the File Catalog """ return self._getRPC(timeout=timeout).deleteUser(userName) def addGroup(self, groupName, timeout=120): """ Add a new group to the File Catalog """ return self._getRPC(timeout=timeout).addGroup(groupName) def deleteGroup(self, groupName, timeout=120): """ Delete group from the File Catalog """ return self._getRPC(timeout=timeout).deleteGroup(groupName) ################################################################### # # User/Group read operations # def getUsers(self, timeout=120): """ Get all the users defined in the File Catalog """ return self._getRPC(timeout=timeout).getUsers() def getGroups(self, timeout=120): """ Get all the groups defined in the File Catalog """ return self._getRPC(timeout=timeout).getGroups() ######################################################################## # # Path read operations # @checkCatalogArguments def exists(self, lfns, timeout=120): """ Check whether the supplied paths exists """ return self._getRPC(timeout=timeout).exists(lfns) ######################################################################## # # File write operations # @checkCatalogArguments def addFile(self, lfns, timeout=120): """ Register supplied files """ return self._getRPC(timeout=timeout).addFile(lfns) @checkCatalogArguments def removeFile(self, lfns, timeout=120): """ Remove the supplied lfns """ return self._getRPC(timeout=timeout).removeFile(lfns) @checkCatalogArguments def setFileStatus(self, lfns, timeout=120): """ Remove the supplied lfns """ return self._getRPC(timeout=timeout).setFileStatus(lfns) @checkCatalogArguments def addReplica(self, lfns, timeout=120): """ Register supplied replicas """ return self._getRPC(timeout=timeout).addReplica(lfns) @checkCatalogArguments def removeReplica(self, lfns, timeout=120): """ Remove the supplied replicas """ return self._getRPC(timeout=timeout).removeReplica(lfns) @checkCatalogArguments def setReplicaStatus(self, lfns, timeout=120): """ Set the status for the supplied replicas """ return self._getRPC(timeout=timeout).setReplicaStatus(lfns) @checkCatalogArguments def setReplicaHost(self, lfns, timeout=120): """ Change the registered SE for the supplied replicas """ return self._getRPC(timeout=timeout).setReplicaHost(lfns) @checkCatalogArguments def addFileAncestors(self, lfns, timeout=120): """ Add file ancestor information for the given list of LFNs """ return self._getRPC(timeout=timeout).addFileAncestors(lfns) ######################################################################## # # File read operations # @checkCatalogArguments def isFile(self, lfns, timeout=120): """ Check whether the supplied lfns are files """ return self._getRPC(timeout=timeout).isFile(lfns) @checkCatalogArguments def getFileSize(self, lfns, timeout=120): """ Get the size associated to supplied lfns """ return self._getRPC(timeout=timeout).getFileSize(lfns) @checkCatalogArguments def getFileMetadata(self, lfns, timeout=120): """ Get the metadata associated to supplied lfns """ return self._getRPC(timeout=timeout).getFileMetadata(lfns) @checkCatalogArguments def getReplicaStatus(self, lfns, timeout=120): """ Get the status for the supplied replicas """ return self._getRPC(timeout=timeout).getReplicaStatus(lfns) @checkCatalogArguments def getFileAncestors(self, lfns, depths, timeout=120): """ Get the status for the supplied replicas """ return self._getRPC(timeout=timeout).getFileAncestors(lfns, depths) @checkCatalogArguments def getFileDescendents(self, lfns, depths, timeout=120): """ Get the status for the supplied replicas """ return self._getRPC(timeout=timeout).getFileDescendents(lfns, depths) def getLFNForGUID(self, guids, timeout=120): """Get the matching lfns for given guids""" return self._getRPC(timeout=timeout).getLFNForGUID(guids) ######################################################################## # # Directory write operations # @checkCatalogArguments def createDirectory(self, lfns, timeout=120): """ Create the supplied directories """ return self._getRPC(timeout=timeout).createDirectory(lfns) ######################################################################## # # Directory read operations # @checkCatalogArguments def isDirectory(self, lfns, timeout=120): """ Determine whether supplied path is a directory """ return self._getRPC(timeout=timeout).isDirectory(lfns) @checkCatalogArguments def getDirectorySize(self, lfns, longOut=False, fromFiles=False, timeout=120): """ Get the size of the supplied directory """ return self._getRPC(timeout=timeout).getDirectorySize(lfns, longOut, fromFiles) ######################################################################## # # Administrative database operations # def getCatalogCounters(self, timeout=120): """ Get the number of registered directories, files and replicas in various tables """ return self._getRPC(timeout=timeout).getCatalogCounters() def rebuildDirectoryUsage(self, timeout=120): """ Rebuild DirectoryUsage table from scratch """ return self._getRPC(timeout=timeout).rebuildDirectoryUsage() def repairCatalog(self, timeout=120): """ Repair the catalog inconsistencies """ return self._getRPC(timeout=timeout).repairCatalog() ######################################################################## # Metadata Catalog Operations # def addMetadataField(self, fieldName, fieldType, metaType='-d', timeout=120): """ Add a new metadata field of the given type """ return self._getRPC(timeout=timeout).addMetadataField(fieldName, fieldType, metaType) def deleteMetadataField(self, fieldName, timeout=120): """ Delete the metadata field """ return self._getRPC(timeout=timeout).deleteMetadataField(fieldName) def getMetadataFields(self, timeout=120): """ Get all the metadata fields """ return self._getRPC(timeout=timeout).getMetadataFields() def setMetadata(self, path, metadatadict, timeout=120): """ Set metadata parameter for the given path """ return self._getRPC(timeout=timeout).setMetadata(path, metadatadict) def setMetadataBulk(self, pathMetadataDict, timeout=120): """ Set metadata parameter for the given path """ return self._getRPC(timeout=timeout).setMetadataBulk(pathMetadataDict) def removeMetadata(self, pathMetadataDict, timeout=120): """ Remove the specified metadata for the given path """ return self._getRPC(timeout=timeout).removeMetadata(pathMetadataDict) def getDirectoryUserMetadata(self, path, timeout=120): """ Get all the metadata valid for the given directory path """ return self._getRPC(timeout=timeout).getDirectoryUserMetadata(path) def findDirectoriesByMetadata(self, metaDict, path='/', timeout=120): """ Find all the directories satisfying the given metadata set """ return self._getRPC(timeout=timeout).findDirectoriesByMetadata(metaDict, path) def getReplicasByMetadata(self, metaDict, path='/', allStatus=False, timeout=120): """ Find all the files satisfying the given metadata set """ return self._getRPC(timeout=timeout).getReplicasByMetadata(metaDict, path, allStatus) def findFilesByMetadataDetailed(self, metaDict, path='/', timeout=120): """ Find all the files satisfying the given metadata set """ return self._getRPC(timeout=timeout).findFilesByMetadataDetailed(metaDict, path) def findFilesByMetadataWeb(self, metaDict, path, startItem, maxItems, timeout=120): """ Find files satisfying the given metadata set """ return self._getRPC(timeout=timeout).findFilesByMetadataWeb(metaDict, path, startItem, maxItems) def getCompatibleMetadata(self, metaDict, path='/', timeout=120): """ Get metadata values compatible with the given metadata subset """ return self._getRPC(timeout=timeout).getCompatibleMetadata(metaDict, path) def addMetadataSet(self, setName, setDict, timeout=120): """ Add a new metadata set """ return self._getRPC(timeout=timeout).addMetadataSet(setName, setDict) def getMetadataSet(self, setName, expandFlag, timeout=120): """ Add a new metadata set """ return self._getRPC(timeout=timeout).getMetadataSet(setName, expandFlag) ######################################################################################### # # Dataset manipulation methods # @checkCatalogArguments def addDataset(self, datasets, timeout=120): """ Add a new dynamic dataset defined by its meta query """ return self._getRPC(timeout=timeout).addDataset(datasets) @checkCatalogArguments def addDatasetAnnotation(self, datasetDict, timeout=120): """ Add annotation to an already created dataset """ return self._getRPC(timeout=timeout).addDatasetAnnotation(datasetDict) @checkCatalogArguments def removeDataset(self, datasets, timeout=120): """ Check the given dynamic dataset for changes since its definition """ return self._getRPC(timeout=timeout).removeDataset(datasets) @checkCatalogArguments def checkDataset(self, datasets, timeout=120): """ Check the given dynamic dataset for changes since its definition """ return self._getRPC(timeout=timeout).checkDataset(datasets) @checkCatalogArguments def updateDataset(self, datasets, timeout=120): """ Update the given dynamic dataset for changes since its definition """ return self._getRPC(timeout=timeout).updateDataset(datasets) @checkCatalogArguments def getDatasets(self, datasets, timeout=120): """ Get parameters of the given dynamic dataset as they are stored in the database """ return self._getRPC(timeout=timeout).getDatasets(datasets) @checkCatalogArguments def getDatasetParameters(self, datasets, timeout=120): """ Get parameters of the given dynamic dataset as they are stored in the database """ return self._getRPC(timeout=timeout).getDatasetParameters(datasets) @checkCatalogArguments def getDatasetAnnotation(self, datasets, timeout=120): """ Get annotation of the given datasets """ return self._getRPC(timeout=timeout).getDatasetAnnotation(datasets) @checkCatalogArguments def freezeDataset(self, datasets, timeout=120): """ Freeze the contents of the dataset making it effectively static """ return self._getRPC(timeout=timeout).freezeDataset(datasets) @checkCatalogArguments def releaseDataset(self, datasets, timeout=120): """ Release the contents of the frozen dataset allowing changes in its contents """ return self._getRPC(timeout=timeout).releaseDataset(datasets) @checkCatalogArguments def getDatasetFiles(self, datasets, timeout=120): """ Get lfns in the given dataset two lines ! """ return self._getRPC(timeout=timeout).getDatasetFiles(datasets) ############################################################################# def getSEDump(self, seName, outputFilename): """ Dump the content of an SE in the given file. The file contains a list of [lfn,checksum,size] dumped as csv, separated by '|' :param seName: name of the StorageElement :param outputFilename: path to the file where to dump it :returns: result from the TransferClient """ dfc = TransferClient(self.serverURL) return dfc.receiveFile(outputFilename, seName)
gpl-3.0
pytorch/fairseq
examples/laser/laser_src/laser_transformer.py
1
11947
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import logging from typing import Any, Dict, List, Optional from torch import Tensor import torch import torch.nn as nn from fairseq.models import ( FairseqEncoderDecoderModel, register_model, register_model_architecture, ) from fairseq.models.transformer import ( base_architecture, Embedding, TransformerModel, TransformerEncoder, TransformerDecoder, ) from fairseq.modules import ( TransformerDecoderLayer, ) logger = logging.getLogger(__name__) @register_model("laser_transformer") class LaserTransformerModel(FairseqEncoderDecoderModel): """Train Transformer for LASER task Requires --task laser """ def __init__(self, encoder, decoder): super().__init__(encoder, decoder) def forward( self, src_tokens, src_lengths, prev_output_tokens=None, tgt_tokens=None, tgt_lengths=None, target_language_id=-1, dataset_name="", ): laser_encoder_out = self.encoder(src_tokens, src_lengths) return self.decoder( prev_output_tokens, laser_encoder_out, lang_id=target_language_id ) @staticmethod def add_args(parser): """Add model-specific arguments to the parser.""" TransformerModel.add_args(parser) parser.add_argument( "--decoder-lang-embed-dim", type=int, metavar="N", help="decoder language embedding dimension", ) @classmethod def build_model(cls, args, task): base_laser_transformer_architecture(args) num_langs = task.num_tasks if hasattr(task, "num_tasks") else 0 def load_embed_tokens(dictionary, embed_dim): num_embeddings = len(dictionary) padding_idx = dictionary.pad() return Embedding(num_embeddings, embed_dim, padding_idx) encoder_embed_tokens = load_embed_tokens( task.source_dictionary, args.encoder_embed_dim ) decoder_embed_tokens = load_embed_tokens( task.target_dictionary, args.decoder_embed_dim ) num_langs = task.num_tasks if hasattr(task, "num_tasks") else 0 encoder = LaserTransformerEncoder( args, task.source_dictionary, encoder_embed_tokens ) decoder = LaserTransformerDecoder( args, task.target_dictionary, decoder_embed_tokens, num_langs=num_langs, lang_embed_dim=args.decoder_lang_embed_dim, ) return cls(encoder, decoder) class LaserTransformerEncoder(TransformerEncoder): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) def forward(self, src_tokens, *args, **kwargs): encoder_out = super().forward(src_tokens, *args, **kwargs) x = encoder_out["encoder_out"][0] # T x B x C padding_mask = src_tokens.eq(self.padding_idx).t().unsqueeze(-1) if padding_mask.any(): x = x.float().masked_fill_(padding_mask, float("-inf")).type_as(x) # Build the sentence embedding by max-pooling over the encoder outputs sentemb = x.max(dim=0)[0] # The Pytorch Mobile lite interpreter does not supports returning NamedTuple in # `foward` so we use a dictionary instead. # TorchScript does not support mixed values so the values are all lists. # The empty list is equivalent to None. return {"sentemb": [sentemb]} # B x C @torch.jit.export def reorder_encoder_out(self, encoder_out: Dict[str, List[Tensor]], new_order): """ Same as the one in transformer.py, with new_sentemb """ if len(encoder_out["sentemb"]) == 0: new_sentemb = [] else: new_sentemb = [encoder_out["sentemb"][0].index_select(0, new_order)] return { "sentemb": new_sentemb, # B x C } class LaserTransformerDecoder(TransformerDecoder): def __init__(self, args, dictionary, *kargs, **kwargs): self.num_langs = kwargs.get("num_langs", 1) self.lang_embed_dim = kwargs.get("lang_embed_dim", 0) kwargs.pop("num_langs", None) kwargs.pop("lang_embed_dim", None) super().__init__(args, dictionary, *kargs, **kwargs, no_encoder_attn=True) if self.lang_embed_dim == 0: self.embed_lang = None else: self.embed_lang = nn.Embedding(self.num_langs, self.lang_embed_dim) nn.init.uniform_(self.embed_lang.weight, -0.1, 0.1) if self.output_projection is not None: laser_output_embed_dim = ( self.output_embed_dim + self.lang_embed_dim + args.encoder_embed_dim ) self.output_projection = nn.Linear( laser_output_embed_dim, len(dictionary), bias=False ) nn.init.normal_( self.output_projection.weight, mean=0, std=laser_output_embed_dim ** -0.5, ) def build_decoder_layer(self, args, no_encoder_attn=False): decoder_embed_dim = args.decoder_embed_dim args.decoder_embed_dim = ( decoder_embed_dim + self.lang_embed_dim + args.encoder_embed_dim ) res = TransformerDecoderLayer(args, no_encoder_attn=True) args.decoder_embed_dim = decoder_embed_dim return res def extract_features( self, prev_output_tokens, encoder_out: Optional[Dict[str, List[Tensor]]], incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]] = None, full_context_alignment: bool = False, alignment_layer: Optional[int] = None, alignment_heads: Optional[int] = None, lang_id: Optional[int] = None, ): """ Similar to *forward* but only return features. Includes several features from "Jointly Learning to Align and Translate with Transformer Models" (Garg et al., EMNLP 2019). Args: full_context_alignment (bool, optional): don't apply auto-regressive mask to self-attention (default: False). alignment_layer (int, optional): return mean alignment over heads at this layer (default: last layer). alignment_heads (int, optional): only average alignment over this many heads (default: all heads). Returns: tuple: - the decoder's features of shape `(batch, tgt_len, embed_dim)` - a dictionary with any model-specific outputs """ if alignment_layer is None: alignment_layer = self.num_layers - 1 # embed positions positions = ( self.embed_positions( prev_output_tokens, incremental_state=incremental_state ) if self.embed_positions is not None else None ) if incremental_state is not None: prev_output_tokens = prev_output_tokens[:, -1:] if positions is not None: positions = positions[:, -1:] bsz, seqlen = prev_output_tokens.size() # embed tokens and positions x = self.embed_scale * self.embed_tokens(prev_output_tokens) if self.quant_noise is not None: x = self.quant_noise(x) if self.project_in_dim is not None: x = self.project_in_dim(x) if positions is not None: x += positions if self.layernorm_embedding is not None: x = self.layernorm_embedding(x) x = self.dropout_module(x) # B x T x C -> T x B x C x = x.transpose(0, 1) if self.embed_lang is not None: lang_ids = prev_output_tokens.data.new_full((bsz,), lang_id) langemb = self.embed_lang(lang_ids) langemb = langemb.unsqueeze(0) repeat_vals = [x.shape[0] // langemb.shape[0]] + [-1] * ( len(langemb.shape) - 1 ) x = torch.cat((x, langemb.expand(*repeat_vals)), dim=-1) sentemb = encoder_out["sentemb"][0] sentemb = sentemb.unsqueeze(0) repeat_vals = [x.shape[0] // sentemb.shape[0]] + [-1] * (len(sentemb.shape) - 1) x = torch.cat((x, sentemb.expand(*repeat_vals)), dim=-1) self_attn_padding_mask: Optional[Tensor] = None if self.cross_self_attention or prev_output_tokens.eq(self.padding_idx).any(): self_attn_padding_mask = prev_output_tokens.eq(self.padding_idx) # decoder layers attn: Optional[Tensor] = None inner_states: List[Optional[Tensor]] = [x] for idx, layer in enumerate(self.layers): if incremental_state is None and not full_context_alignment: self_attn_mask = self.buffered_future_mask(x) else: self_attn_mask = None x, layer_attn, _ = layer( x, None, None, incremental_state, self_attn_mask=self_attn_mask, self_attn_padding_mask=self_attn_padding_mask, need_attn=bool((idx == alignment_layer)), need_head_weights=bool((idx == alignment_layer)), ) inner_states.append(x) if layer_attn is not None and idx == alignment_layer: attn = layer_attn.float().to(x) if attn is not None: if alignment_heads is not None: attn = attn[:alignment_heads] # average probabilities over heads attn = attn.mean(dim=0) if self.layer_norm is not None: x = self.layer_norm(x) # T x B x C -> B x T x C x = x.transpose(0, 1) if self.project_out_dim is not None: x = self.project_out_dim(x) return x, {"attn": [attn], "inner_states": inner_states} def forward( self, prev_output_tokens, encoder_out: Optional[Dict[str, List[Tensor]]] = None, incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]] = None, features_only: bool = False, alignment_layer: Optional[int] = None, alignment_heads: Optional[int] = None, src_lengths: Optional[Any] = None, return_all_hiddens: bool = False, lang_id: Optional[int] = None, ): """ Args: prev_output_tokens (LongTensor): previous decoder outputs of shape `(batch, tgt_len)`, for teacher forcing encoder_out (optional): output from the encoder, used for encoder-side attention incremental_state (dict): dictionary used for storing state during :ref:`Incremental decoding` features_only (bool, optional): only return features without applying output layer (default: False). Returns: tuple: - the decoder's output of shape `(batch, tgt_len, vocab)` - a dictionary with any model-specific outputs """ assert lang_id is not None x, extra = self.extract_features( prev_output_tokens, encoder_out=encoder_out, incremental_state=incremental_state, alignment_layer=alignment_layer, alignment_heads=alignment_heads, lang_id=lang_id, ) if not features_only: x = self.output_layer(x) return x, extra @register_model_architecture("laser_transformer", "laser_transformer") def base_laser_transformer_architecture(args): base_architecture(args) args.decoder_lang_embed_dim = getattr(args, "decoder_lang_embed_dim", 0)
mit
florian-f/sklearn
examples/linear_model/plot_lasso_coordinate_descent_path.py
4
2823
""" ===================== Lasso and Elastic Net ===================== Lasso and elastic net (L1 and L2 penalisation) implemented using a coordinate descent. The coefficients can be forced to be positive. """ print(__doc__) # Author: Alexandre Gramfort <alexandre.gramfort@inria.fr> # License: BSD Style. import numpy as np import pylab as pl from sklearn.linear_model import lasso_path, enet_path from sklearn import datasets diabetes = datasets.load_diabetes() X = diabetes.data y = diabetes.target X /= X.std(0) # Standardize data (easier to set the l1_ratio parameter) ############################################################################### # Compute paths eps = 5e-3 # the smaller it is the longer is the path print("Computing regularization path using the lasso...") models = lasso_path(X, y, eps=eps) alphas_lasso = np.array([model.alpha for model in models]) coefs_lasso = np.array([model.coef_ for model in models]) print("Computing regularization path using the positive lasso...") models = lasso_path(X, y, eps=eps, positive=True) alphas_positive_lasso = np.array([model.alpha for model in models]) coefs_positive_lasso = np.array([model.coef_ for model in models]) print("Computing regularization path using the elastic net...") models = enet_path(X, y, eps=eps, l1_ratio=0.8) alphas_enet = np.array([model.alpha for model in models]) coefs_enet = np.array([model.coef_ for model in models]) print("Computing regularization path using the positve elastic net...") models = enet_path(X, y, eps=eps, l1_ratio=0.8, positive=True) alphas_positive_enet = np.array([model.alpha for model in models]) coefs_positive_enet = np.array([model.coef_ for model in models]) ############################################################################### # Display results pl.figure(1) ax = pl.gca() ax.set_color_cycle(2 * ['b', 'r', 'g', 'c', 'k']) l1 = pl.plot(coefs_lasso) l2 = pl.plot(coefs_enet, linestyle='--') pl.xlabel('-Log(lambda)') pl.ylabel('weights') pl.title('Lasso and Elastic-Net Paths') pl.legend((l1[-1], l2[-1]), ('Lasso', 'Elastic-Net'), loc='lower left') pl.axis('tight') pl.figure(2) ax = pl.gca() ax.set_color_cycle(2 * ['b', 'r', 'g', 'c', 'k']) l1 = pl.plot(coefs_lasso) l2 = pl.plot(coefs_positive_lasso, linestyle='--') pl.xlabel('-Log(lambda)') pl.ylabel('weights') pl.title('Lasso and positive Lasso') pl.legend((l1[-1], l2[-1]), ('Lasso', 'positive Lasso'), loc='lower left') pl.axis('tight') pl.figure(3) ax = pl.gca() ax.set_color_cycle(2 * ['b', 'r', 'g', 'c', 'k']) l1 = pl.plot(coefs_enet) l2 = pl.plot(coefs_positive_enet, linestyle='--') pl.xlabel('-Log(lambda)') pl.ylabel('weights') pl.title('Elastic-Net and positive Elastic-Net') pl.legend((l1[-1], l2[-1]), ('Elastic-Net', 'positive Elastic-Net'), loc='lower left') pl.axis('tight') pl.show()
bsd-3-clause
luanjunyi/cortana
model/sk_general/train.py
1
3684
# -*- coding: utf-8 -*- import sys, os, math import argparse import cPickle as pickle from collections import defaultdict import numpy as np import scipy.sparse as sparse from sklearn import cross_validation from sklearn.pipeline import Pipeline from sklearn.grid_search import GridSearchCV from sklearn.naive_bayes import BernoulliNB from sklearn.linear_model import LogisticRegression from sklearn.linear_model import SGDClassifier from sklearn.neighbors import KNeighborsClassifier from sklearn.svm import LinearSVC from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer from sklearn.feature_selection import SelectKBest, chi2 from util.log import _logger from util import * from feat.terms.term_categorize import term_category CLFs = { "nb": BernoulliNB(fit_prior = False), "sgd": SGDClassifier(penalty="l2", class_weight="auto", n_iter=100), "svm_ovr": LinearSVC(loss='l1', penalty="l2", multi_class="ovr", class_weight="auto"), "svm_sin": LinearSVC(loss='l1', penalty="l2", multi_class="crammer_singer"), "knn": KNeighborsClassifier(n_neighbors=10, weights = 'distance') } class Vectorizer(object): def __init__(self): self.count_vec = TfidfVectorizer(binary = True, ngram_range = (1, 3), tokenizer = Tokenizer()) self.last_vec = CountVectorizer(binary = True, ngram_range = (1, 1), tokenizer = Tokenizer()) def collect_last_term(self, X): X_last = list() tokens = self.last_vec.build_tokenizer() _logger.debug("Extracting last term for each sentence") for sent in X: X_last.append(tokens(sent)[-1]) _logger.debug("Fitting last-term vectorizer") return X_last def fit(self, X, y = None): _logger.debug("Fitting count vectorizer") self.count_vec.fit(X) X_last = self.collect_last_term(X) self.last_vec.fit(X_last) return self def transform(self, X, y = None): #return self.count_vec.transform(X) _logger.debug("Doing tfidf transform") Xc = self.count_vec.transform(X) X_last = self.collect_last_term(X) _logger.debug("Doing last term transform") Xl = self.last_vec.transform(X_last) _logger.debug("stacking features") ret = sparse.hstack([Xc, Xl]) tokens = self.count_vec.build_tokenizer() l = list() for sent in X: terms = tokens(sent) l.append(1 if ("__LOCATION__" in terms and "__ORGNIZATION__" in terms) else 0) l = np.array(l) l.shape = len(l), 1 ret = sparse.hstack([ret, l]) _logger.debug("vectorization transform done") return ret if __name__ == "__main__": cmd = argparse.ArgumentParser() cmd.add_argument("--input", help="path of the training data", default = TRAIN_FILE_PATH) cmd.add_argument("--algo", help="alogrithm to use", required=True, choices = CLFs.keys()) args = cmd.parse_args() X, y = load_data(args.input) _logger.info("training using %s" % args.algo) pipeline = Pipeline([ ("vert", TfidfVectorizer(min_df = 1, binary = True, ngram_range = (1, 3), tokenizer = Tokenizer())), #("vert", Vectorizer()), ("clf", CLFs[args.algo]), ]) pipeline.fit(X, y) from decode import test test(TEST_FILE_PATH, pipeline) outpath = "%s.model" % args.algo with open(outpath, "w") as outfile: pickle.dump(pipeline, outfile) _logger.info("Model dumpped to %s" % outpath)
mit
WilsonWangTHU/clothesDetection
tools/train_svms.py
42
13247
#!/usr/bin/env python # -------------------------------------------------------- # Fast R-CNN # Copyright (c) 2015 Microsoft # Licensed under The MIT License [see LICENSE for details] # Written by Ross Girshick # -------------------------------------------------------- """ Train post-hoc SVMs using the algorithm and hyper-parameters from traditional R-CNN. """ import _init_paths from fast_rcnn.config import cfg, cfg_from_file from datasets.factory import get_imdb from fast_rcnn.test import im_detect from utils.timer import Timer import caffe import argparse import pprint import numpy as np import numpy.random as npr import cv2 from sklearn import svm import os, sys class SVMTrainer(object): """ Trains post-hoc detection SVMs for all classes using the algorithm and hyper-parameters of traditional R-CNN. """ def __init__(self, net, imdb): self.imdb = imdb self.net = net self.layer = 'fc7' self.hard_thresh = -1.0001 self.neg_iou_thresh = 0.3 dim = net.params['cls_score'][0].data.shape[1] scale = self._get_feature_scale() print('Feature dim: {}'.format(dim)) print('Feature scale: {:.3f}'.format(scale)) self.trainers = [SVMClassTrainer(cls, dim, feature_scale=scale) for cls in imdb.classes] def _get_feature_scale(self, num_images=100): TARGET_NORM = 20.0 # Magic value from traditional R-CNN _t = Timer() roidb = self.imdb.roidb total_norm = 0.0 count = 0.0 inds = npr.choice(xrange(self.imdb.num_images), size=num_images, replace=False) for i_, i in enumerate(inds): im = cv2.imread(self.imdb.image_path_at(i)) if roidb[i]['flipped']: im = im[:, ::-1, :] _t.tic() scores, boxes = im_detect(self.net, im, roidb[i]['boxes']) _t.toc() feat = self.net.blobs[self.layer].data total_norm += np.sqrt((feat ** 2).sum(axis=1)).sum() count += feat.shape[0] print('{}/{}: avg feature norm: {:.3f}'.format(i_ + 1, num_images, total_norm / count)) return TARGET_NORM * 1.0 / (total_norm / count) def _get_pos_counts(self): counts = np.zeros((len(self.imdb.classes)), dtype=np.int) roidb = self.imdb.roidb for i in xrange(len(roidb)): for j in xrange(1, self.imdb.num_classes): I = np.where(roidb[i]['gt_classes'] == j)[0] counts[j] += len(I) for j in xrange(1, self.imdb.num_classes): print('class {:s} has {:d} positives'. format(self.imdb.classes[j], counts[j])) return counts def get_pos_examples(self): counts = self._get_pos_counts() for i in xrange(len(counts)): self.trainers[i].alloc_pos(counts[i]) _t = Timer() roidb = self.imdb.roidb num_images = len(roidb) # num_images = 100 for i in xrange(num_images): im = cv2.imread(self.imdb.image_path_at(i)) if roidb[i]['flipped']: im = im[:, ::-1, :] gt_inds = np.where(roidb[i]['gt_classes'] > 0)[0] gt_boxes = roidb[i]['boxes'][gt_inds] _t.tic() scores, boxes = im_detect(self.net, im, gt_boxes) _t.toc() feat = self.net.blobs[self.layer].data for j in xrange(1, self.imdb.num_classes): cls_inds = np.where(roidb[i]['gt_classes'][gt_inds] == j)[0] if len(cls_inds) > 0: cls_feat = feat[cls_inds, :] self.trainers[j].append_pos(cls_feat) print 'get_pos_examples: {:d}/{:d} {:.3f}s' \ .format(i + 1, len(roidb), _t.average_time) def initialize_net(self): # Start all SVM parameters at zero self.net.params['cls_score'][0].data[...] = 0 self.net.params['cls_score'][1].data[...] = 0 # Initialize SVMs in a smart way. Not doing this because its such # a good initialization that we might not learn something close to # the SVM solution. # # subtract background weights and biases for the foreground classes # w_bg = self.net.params['cls_score'][0].data[0, :] # b_bg = self.net.params['cls_score'][1].data[0] # self.net.params['cls_score'][0].data[1:, :] -= w_bg # self.net.params['cls_score'][1].data[1:] -= b_bg # # set the background weights and biases to 0 (where they shall remain) # self.net.params['cls_score'][0].data[0, :] = 0 # self.net.params['cls_score'][1].data[0] = 0 def update_net(self, cls_ind, w, b): self.net.params['cls_score'][0].data[cls_ind, :] = w self.net.params['cls_score'][1].data[cls_ind] = b def train_with_hard_negatives(self): _t = Timer() roidb = self.imdb.roidb num_images = len(roidb) # num_images = 100 for i in xrange(num_images): im = cv2.imread(self.imdb.image_path_at(i)) if roidb[i]['flipped']: im = im[:, ::-1, :] _t.tic() scores, boxes = im_detect(self.net, im, roidb[i]['boxes']) _t.toc() feat = self.net.blobs[self.layer].data for j in xrange(1, self.imdb.num_classes): hard_inds = \ np.where((scores[:, j] > self.hard_thresh) & (roidb[i]['gt_overlaps'][:, j].toarray().ravel() < self.neg_iou_thresh))[0] if len(hard_inds) > 0: hard_feat = feat[hard_inds, :].copy() new_w_b = \ self.trainers[j].append_neg_and_retrain(feat=hard_feat) if new_w_b is not None: self.update_net(j, new_w_b[0], new_w_b[1]) print(('train_with_hard_negatives: ' '{:d}/{:d} {:.3f}s').format(i + 1, len(roidb), _t.average_time)) def train(self): # Initialize SVMs using # a. w_i = fc8_w_i - fc8_w_0 # b. b_i = fc8_b_i - fc8_b_0 # c. Install SVMs into net self.initialize_net() # Pass over roidb to count num positives for each class # a. Pre-allocate arrays for positive feature vectors # Pass over roidb, computing features for positives only self.get_pos_examples() # Pass over roidb # a. Compute cls_score with forward pass # b. For each class # i. Select hard negatives # ii. Add them to cache # c. For each class # i. If SVM retrain criteria met, update SVM # ii. Install new SVM into net self.train_with_hard_negatives() # One final SVM retraining for each class # Install SVMs into net for j in xrange(1, self.imdb.num_classes): new_w_b = self.trainers[j].append_neg_and_retrain(force=True) self.update_net(j, new_w_b[0], new_w_b[1]) class SVMClassTrainer(object): """Manages post-hoc SVM training for a single object class.""" def __init__(self, cls, dim, feature_scale=1.0, C=0.001, B=10.0, pos_weight=2.0): self.pos = np.zeros((0, dim), dtype=np.float32) self.neg = np.zeros((0, dim), dtype=np.float32) self.B = B self.C = C self.cls = cls self.pos_weight = pos_weight self.dim = dim self.feature_scale = feature_scale self.svm = svm.LinearSVC(C=C, class_weight={1: 2, -1: 1}, intercept_scaling=B, verbose=1, penalty='l2', loss='l1', random_state=cfg.RNG_SEED, dual=True) self.pos_cur = 0 self.num_neg_added = 0 self.retrain_limit = 2000 self.evict_thresh = -1.1 self.loss_history = [] def alloc_pos(self, count): self.pos_cur = 0 self.pos = np.zeros((count, self.dim), dtype=np.float32) def append_pos(self, feat): num = feat.shape[0] self.pos[self.pos_cur:self.pos_cur + num, :] = feat self.pos_cur += num def train(self): print('>>> Updating {} detector <<<'.format(self.cls)) num_pos = self.pos.shape[0] num_neg = self.neg.shape[0] print('Cache holds {} pos examples and {} neg examples'. format(num_pos, num_neg)) X = np.vstack((self.pos, self.neg)) * self.feature_scale y = np.hstack((np.ones(num_pos), -np.ones(num_neg))) self.svm.fit(X, y) w = self.svm.coef_ b = self.svm.intercept_[0] scores = self.svm.decision_function(X) pos_scores = scores[:num_pos] neg_scores = scores[num_pos:] pos_loss = (self.C * self.pos_weight * np.maximum(0, 1 - pos_scores).sum()) neg_loss = self.C * np.maximum(0, 1 + neg_scores).sum() reg_loss = 0.5 * np.dot(w.ravel(), w.ravel()) + 0.5 * b ** 2 tot_loss = pos_loss + neg_loss + reg_loss self.loss_history.append((tot_loss, pos_loss, neg_loss, reg_loss)) for i, losses in enumerate(self.loss_history): print((' {:d}: obj val: {:.3f} = {:.3f} ' '(pos) + {:.3f} (neg) + {:.3f} (reg)').format(i, *losses)) return ((w * self.feature_scale, b * self.feature_scale), pos_scores, neg_scores) def append_neg_and_retrain(self, feat=None, force=False): if feat is not None: num = feat.shape[0] self.neg = np.vstack((self.neg, feat)) self.num_neg_added += num if self.num_neg_added > self.retrain_limit or force: self.num_neg_added = 0 new_w_b, pos_scores, neg_scores = self.train() # scores = np.dot(self.neg, new_w_b[0].T) + new_w_b[1] # easy_inds = np.where(neg_scores < self.evict_thresh)[0] not_easy_inds = np.where(neg_scores >= self.evict_thresh)[0] if len(not_easy_inds) > 0: self.neg = self.neg[not_easy_inds, :] # self.neg = np.delete(self.neg, easy_inds) print(' Pruning easy negatives') print(' Cache holds {} pos examples and {} neg examples'. format(self.pos.shape[0], self.neg.shape[0])) print(' {} pos support vectors'.format((pos_scores <= 1).sum())) print(' {} neg support vectors'.format((neg_scores >= -1).sum())) return new_w_b else: return None def parse_args(): """ Parse input arguments """ parser = argparse.ArgumentParser(description='Train SVMs (old skool)') parser.add_argument('--gpu', dest='gpu_id', help='GPU device id to use [0]', default=0, type=int) parser.add_argument('--def', dest='prototxt', help='prototxt file defining the network', default=None, type=str) parser.add_argument('--net', dest='caffemodel', help='model to test', default=None, type=str) parser.add_argument('--cfg', dest='cfg_file', help='optional config file', default=None, type=str) parser.add_argument('--imdb', dest='imdb_name', help='dataset to train on', default='voc_2007_trainval', type=str) if len(sys.argv) == 1: parser.print_help() sys.exit(1) args = parser.parse_args() return args if __name__ == '__main__': # Must turn this off to prevent issues when digging into the net blobs to # pull out features (tricky!) cfg.DEDUP_BOXES = 0 # Must turn this on because we use the test im_detect() method to harvest # hard negatives cfg.TEST.SVM = True args = parse_args() print('Called with args:') print(args) if args.cfg_file is not None: cfg_from_file(args.cfg_file) print('Using config:') pprint.pprint(cfg) # fix the random seed for reproducibility np.random.seed(cfg.RNG_SEED) # set up caffe caffe.set_mode_gpu() if args.gpu_id is not None: caffe.set_device(args.gpu_id) net = caffe.Net(args.prototxt, args.caffemodel, caffe.TEST) net.name = os.path.splitext(os.path.basename(args.caffemodel))[0] out = os.path.splitext(os.path.basename(args.caffemodel))[0] + '_svm' out_dir = os.path.dirname(args.caffemodel) imdb = get_imdb(args.imdb_name) print 'Loaded dataset `{:s}` for training'.format(imdb.name) # enhance roidb to contain flipped examples if cfg.TRAIN.USE_FLIPPED: print 'Appending horizontally-flipped training examples...' imdb.append_flipped_roidb() print 'done' SVMTrainer(net, imdb).train() filename = '{}/{}.caffemodel'.format(out_dir, out) net.save(filename) print 'Wrote svm model to: {:s}'.format(filename)
mit
pytorch/fairseq
tests/test_transformer.py
1
1942
import argparse import unittest from typing import Any, Dict, Sequence import torch from fairseq.models import transformer from tests.test_roberta import FakeTask def mk_sample(tok: Sequence[int] = None, batch_size: int = 2) -> Dict[str, Any]: if not tok: tok = [10, 11, 12, 13, 14, 15, 2] batch = torch.stack([torch.tensor(tok, dtype=torch.long)] * batch_size) sample = { "net_input": { "src_tokens": batch, "prev_output_tokens": batch, "src_lengths": torch.tensor( [len(tok)] * batch_size, dtype=torch.long, device=batch.device ), }, "target": batch[:, 1:], } return sample def mk_transformer(**extra_args: Any): overrides = { # Use characteristics dimensions "encoder_embed_dim": 12, "encoder_ffn_embed_dim": 14, "decoder_embed_dim": 12, "decoder_ffn_embed_dim": 14, # Disable dropout so we have comparable tests. "dropout": 0, "attention_dropout": 0, "activation_dropout": 0, "encoder_layerdrop": 0, } overrides.update(extra_args) # Overrides the defaults from the parser args = argparse.Namespace(**overrides) transformer.tiny_architecture(args) torch.manual_seed(0) task = FakeTask(args) return transformer.TransformerModel.build_model(args, task) class TransformerTestCase(unittest.TestCase): def test_forward_backward(self): model = mk_transformer(encoder_embed_dim=12, decoder_embed_dim=12) sample = mk_sample() o, _ = model.forward(**sample["net_input"]) loss = o.sum() loss.backward() def test_different_encoder_decoder_embed_dim(self): model = mk_transformer(encoder_embed_dim=12, decoder_embed_dim=16) sample = mk_sample() o, _ = model.forward(**sample["net_input"]) loss = o.sum() loss.backward()
mit
pytorch/fairseq
fairseq/criterions/nat_loss.py
1
6355
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import math import torch import torch.nn.functional as F from fairseq import metrics, utils from fairseq.criterions import FairseqCriterion, register_criterion from fairseq.dataclass import FairseqDataclass from torch import Tensor from dataclasses import dataclass, field @dataclass class LabelSmoothedDualImitationCriterionConfig(FairseqDataclass): label_smoothing: float = field( default=0.0, metadata={"help": "epsilon for label smoothing, 0 means no label smoothing"}, ) @register_criterion("nat_loss", dataclass=LabelSmoothedDualImitationCriterionConfig) class LabelSmoothedDualImitationCriterion(FairseqCriterion): def __init__(self, task, label_smoothing): super().__init__(task) self.label_smoothing = label_smoothing def _compute_loss( self, outputs, targets, masks=None, label_smoothing=0.0, name="loss", factor=1.0 ): """ outputs: batch x len x d_model targets: batch x len masks: batch x len policy_logprob: if there is some policy depends on the likelihood score as rewards. """ def mean_ds(x: Tensor, dim=None) -> Tensor: return ( x.float().mean().type_as(x) if dim is None else x.float().mean(dim).type_as(x) ) if masks is not None: outputs, targets = outputs[masks], targets[masks] if masks is not None and not masks.any(): nll_loss = torch.tensor(0) loss = nll_loss else: logits = F.log_softmax(outputs, dim=-1) if targets.dim() == 1: losses = F.nll_loss(logits, targets.to(logits.device), reduction="none") else: # soft-labels losses = F.kl_div(logits, targets.to(logits.device), reduction="none") losses = losses.sum(-1) nll_loss = mean_ds(losses) if label_smoothing > 0: loss = ( nll_loss * (1 - label_smoothing) - mean_ds(logits) * label_smoothing ) else: loss = nll_loss loss = loss * factor return {"name": name, "loss": loss, "nll_loss": nll_loss, "factor": factor} def _custom_loss(self, loss, name="loss", factor=1.0): return {"name": name, "loss": loss, "factor": factor} def forward(self, model, sample, reduce=True): """Compute the loss for the given sample. Returns a tuple with three elements: 1) the loss 2) the sample size, which is used as the denominator for the gradient 3) logging outputs to display while training """ nsentences, ntokens = sample["nsentences"], sample["ntokens"] # B x T src_tokens, src_lengths = ( sample["net_input"]["src_tokens"], sample["net_input"]["src_lengths"], ) tgt_tokens, prev_output_tokens = sample["target"], sample["prev_target"] outputs = model(src_tokens, src_lengths, prev_output_tokens, tgt_tokens) losses, nll_loss = [], [] for obj in outputs: if outputs[obj].get("loss", None) is None: _losses = self._compute_loss( outputs[obj].get("out"), outputs[obj].get("tgt"), outputs[obj].get("mask", None), outputs[obj].get("ls", 0.0), name=obj + "-loss", factor=outputs[obj].get("factor", 1.0), ) else: _losses = self._custom_loss( outputs[obj].get("loss"), name=obj + "-loss", factor=outputs[obj].get("factor", 1.0), ) losses += [_losses] if outputs[obj].get("nll_loss", False): nll_loss += [_losses.get("nll_loss", 0.0)] loss = sum(l["loss"] for l in losses) nll_loss = sum(l for l in nll_loss) if len(nll_loss) > 0 else loss.new_tensor(0) # NOTE: # we don't need to use sample_size as denominator for the gradient # here sample_size is just used for logging sample_size = 1 logging_output = { "loss": loss.data, "nll_loss": nll_loss.data, "ntokens": ntokens, "nsentences": nsentences, "sample_size": sample_size, } for l in losses: logging_output[l["name"]] = ( utils.item(l["loss"].data / l["factor"]) if reduce else l[["loss"]].data / l["factor"] ) return loss, sample_size, logging_output @staticmethod def reduce_metrics(logging_outputs) -> None: """Aggregate logging outputs from data parallel training.""" sample_size = utils.item( sum(log.get("sample_size", 0) for log in logging_outputs) ) loss = utils.item(sum(log.get("loss", 0) for log in logging_outputs)) nll_loss = utils.item(sum(log.get("nll_loss", 0) for log in logging_outputs)) metrics.log_scalar( "loss", loss / sample_size / math.log(2), sample_size, round=3 ) metrics.log_scalar( "nll_loss", nll_loss / sample_size / math.log(2), sample_size, round=3 ) metrics.log_derived( "ppl", lambda meters: utils.get_perplexity(meters["loss"].avg) ) for key in logging_outputs[0]: if key[-5:] == "-loss": val = sum(log.get(key, 0) for log in logging_outputs) metrics.log_scalar( key[:-5], val / sample_size / math.log(2) if sample_size > 0 else 0.0, sample_size, round=3, ) @staticmethod def logging_outputs_can_be_summed() -> bool: """ Whether the logging outputs returned by `forward` can be summed across workers prior to calling `reduce_metrics`. Setting this to True will improves distributed training speed. """ return True
mit
neilhan/tensorflow
tensorflow/contrib/learn/python/learn/tests/nonlinear_test.py
9
3837
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Non-linear estimator tests.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import random import tensorflow as tf class NonLinearTest(tf.test.TestCase): """Non-linear estimator tests.""" def setUp(self): random.seed(42) tf.set_random_seed(42) def testIrisDNN(self): iris = tf.contrib.learn.datasets.load_iris() feature_columns = [tf.contrib.layers.real_valued_column("", dimension=4)] classifier = tf.contrib.learn.DNNClassifier( feature_columns=feature_columns, hidden_units=[10, 20, 10], n_classes=3, config=tf.contrib.learn.RunConfig(tf_random_seed=1)) classifier.fit(iris.data, iris.target, max_steps=200) weights = classifier.weights_ self.assertEqual(weights[0].shape, (4, 10)) self.assertEqual(weights[1].shape, (10, 20)) self.assertEqual(weights[2].shape, (20, 10)) self.assertEqual(weights[3].shape, (10, 3)) biases = classifier.bias_ self.assertEqual(len(biases), 5) def testBostonDNN(self): boston = tf.contrib.learn.datasets.load_boston() feature_columns = [tf.contrib.layers.real_valued_column("", dimension=13)] regressor = tf.contrib.learn.DNNRegressor( feature_columns=feature_columns, hidden_units=[10, 20, 10], config=tf.contrib.learn.RunConfig(tf_random_seed=1)) regressor.fit( boston.data, boston.target, steps=300, batch_size=boston.data.shape[0]) weights = regressor.weights_ self.assertEqual(weights[0].shape, (13, 10)) self.assertEqual(weights[1].shape, (10, 20)) self.assertEqual(weights[2].shape, (20, 10)) self.assertEqual(weights[3].shape, (10, 1)) biases = regressor.bias_ self.assertEqual(len(biases), 5) def testDNNDropout0(self): # Dropout prob == 0. iris = tf.contrib.learn.datasets.load_iris() feature_columns = [tf.contrib.layers.real_valued_column("", dimension=4)] classifier = tf.contrib.learn.DNNClassifier( feature_columns=feature_columns, hidden_units=[10, 20, 10], n_classes=3, dropout=0.0, config=tf.contrib.learn.RunConfig(tf_random_seed=1)) classifier.fit(iris.data, iris.target, max_steps=200) def testDNNDropout0_1(self): # Dropping only a little. iris = tf.contrib.learn.datasets.load_iris() feature_columns = [tf.contrib.layers.real_valued_column("", dimension=4)] classifier = tf.contrib.learn.DNNClassifier( feature_columns=feature_columns, hidden_units=[10, 20, 10], n_classes=3, dropout=0.1, config=tf.contrib.learn.RunConfig(tf_random_seed=1)) classifier.fit(iris.data, iris.target, max_steps=200) def testDNNDropout0_9(self): # Dropping out most of it. iris = tf.contrib.learn.datasets.load_iris() feature_columns = [tf.contrib.layers.real_valued_column("", dimension=4)] classifier = tf.contrib.learn.DNNClassifier( feature_columns=feature_columns, hidden_units=[10, 20, 10], n_classes=3, dropout=0.9, config=tf.contrib.learn.RunConfig(tf_random_seed=1)) classifier.fit(iris.data, iris.target, max_steps=200) if __name__ == "__main__": tf.test.main()
apache-2.0
GoogleCloudPlatform/python-docs-samples
data-science-onramp/vertex-ai/modules/trainer/sklearn_model/task.py
1
3367
# Copyright 2021 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License") # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https: // www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # [START aiplatform_sklearn_task] # [START aiplatform_sklearn_task_imports] import argparse import os import re from google.cloud import storage import joblib from sklearn.metrics import mean_absolute_error from trainer import utils from trainer.sklearn_model import model # [END aiplatform_sklearn_task_imports] # [START aiplatform_sklearn_task_args] def get_args() -> argparse.Namespace: parser = argparse.ArgumentParser() parser.add_argument( "--input-path", type=str, required=True, help="path to input data" ) parser.add_argument( "--degree", type=int, help="degree of the polynomial regression, default=1 (linear model)", ) parser.add_argument( "--alpha", type=float, help="Regularization strength, default=0 (Standard Regression)", ) parser.add_argument( "--model-dir", type=str, help="Output directory for the model.", default=os.getenv("AIP_MODEL_DIR"), ) return parser.parse_args() # [END aiplatform_sklearn_task_args] # [START aiplatform_sklearn_task_fit] def fit_model( input_path: str, model_dir: str, degree: int = 1, alpha: int = 0 ) -> None: """Train, evaluate and save model given model configuration""" print(f"Fitting model with degree={args.degree} and alpha={args.alpha}") # Split datasets into training and testing train_feature, eval_feature, train_target, eval_target = utils.load_data( input_path) # Create sklearn pipeline for a polynomial model defined in model.py""" polynomial_model = model.polynomial_model(degree, alpha) # Fit the sklearn model print("Fitting model...") polynomial_model.fit(train_feature, train_target) # Evaluate the model print("Evaluating model...") pred_target = polynomial_model.predict(eval_feature) mae = mean_absolute_error(eval_target, pred_target) print(f"Done. Model had MAE={mae}") # [END aiplatform_sklearn_task_fit] # [START aiplatform_sklearn_task_export] # Save model to GCS print("Saving model") matches = re.match("gs://(.*?)/(.*)", model_dir) bucket = matches.group(1) blob = matches.group(2) model_dump = "model.joblib" joblib.dump(polynomial_model, model_dump) blob_name = os.path.join(blob, model_dump) client = storage.Client() client.bucket(bucket).blob(blob_name).upload_from_filename(model_dump) print("Model saved") # [END aiplatform_sklearn_task_export] if __name__ == "__main__": args = get_args() kwargs = {} if args.degree: kwargs["degree"] = args.degree if args.alpha: kwargs["alpha"] = args.alpha fit_model(args.input_path, args.model_dir, **kwargs) # [END aiplatform_sklearn_task]
apache-2.0
GoogleCloudPlatform/bigquery-utils
udfs/tests/udf_test_utils.py
1
7285
# Copyright 2019 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import glob from pathlib import Path import json from yaml import load from yaml import SafeLoader # Some javascript libraries have issues with webpack's auto # minifier and therefore must be placed in the set below # to instruct webpack to not minify them. NO_MINIFY_JS_LIBS = { 'js-levenshtein', } def get_dir_to_dataset_mappings(): bq_datasets_yaml_path = Path('./dir_to_dataset_map.yaml') if bq_datasets_yaml_path.is_file(): with open(bq_datasets_yaml_path, 'r') as yaml_file: return load(yaml_file, Loader=SafeLoader) else: return None def get_all_udf_paths(): return glob.glob('./**/*.sql', recursive=True) def get_all_npm_package_config_paths(node_modules_path): """ Get all paths to the package.json files for every npm package specified in the udfs/js_libs/js_libs.yaml file. :param node_modules_path: path to the node_modules directory :return: Set containing paths to package.json files """ js_libs_dict = get_js_libs_from_yaml() js_libs_with_versions = set() npm_package_config_paths = set() for lib_name in js_libs_dict: for version in js_libs_dict.get(lib_name).get('versions'): js_libs_with_versions.add(f'{lib_name}-v{version}') for npm_package_config_path in glob.glob( f'{node_modules_path}/**/package.json'): npm_package_name = Path(npm_package_config_path).parent.name if npm_package_name in js_libs_with_versions: npm_package_config_paths.add(Path(npm_package_config_path)) return npm_package_config_paths def get_js_libs_from_yaml(): """ Get all npm package names from the /udfs/js_libs/js_libs.yaml file. :return: dict representation of the js_libs.yaml file """ js_libs_yaml_path = Path('./js_libs/js_libs.yaml') if js_libs_yaml_path.is_file(): with open(js_libs_yaml_path, 'r') as yaml_file: return load(yaml_file, Loader=SafeLoader) else: return None def generate_js_libs_package_json(): """ This dynamically generates the main package.json which will be used to build all the js libs that are specified in the udfs/js_libs/js_libs.yaml file. """ js_libs_dict = get_js_libs_from_yaml() js_libs_package_dict = { "name": "js-bq-libs", "version": "1.0.0", "scripts": { "build-all-libs": "concurrently \"npm:webpack-*\"" }, "dependencies": { f'{lib_name}-v{version}': f'npm:{lib_name}@{version}' for lib_name in js_libs_dict for version in js_libs_dict.get(lib_name).get('versions') }, "devDependencies": { "webpack": "^5.3.1", "webpack-cli": "^4.1.0", "concurrently": "^5.3.0" } } # Update with webpack scripts for building all js packages for lib_name in js_libs_dict: for version in js_libs_dict.get(lib_name).get('versions'): js_libs_package_dict.get('scripts').update({ f'webpack-{lib_name}-v{version}': f'webpack --config {lib_name}-v{version}-webpack.config.js' }) with open('./package.json', 'w') as js_libs_package_json: js_libs_package_json.write(json.dumps(js_libs_package_dict, indent=2)) def generate_webpack_configs(): """ This dynamically generates all the webpack config files needed to build the single-file js libs which are specified in the udfs/js_libs/js_libs.yaml file. See https://webpack.js.org/concepts/configuration/ for more information on webpack config files. """ node_modules_path = Path('./node_modules') npm_package_config_paths = get_all_npm_package_config_paths( node_modules_path) for npm_package_config_path in npm_package_config_paths: with open(npm_package_config_path) as npm_package_config: npm_package_json = json.loads(npm_package_config.read()) # Check for js main entrypoint # https://docs.npmjs.com/cli/v6/configuring-npm/package-json#main # If no main entrypoint found, check for a single dependency file # https://docs.npmjs.com/cli/v6/configuring-npm/package-json#files js_main_entrypoint = npm_package_json.get('main') js_dependency_files = npm_package_json.get('files') js_lib_name = npm_package_json.get('name') js_lib_version = npm_package_json.get('version') if js_main_entrypoint is not None: js_main_entrypoint_path = npm_package_config_path.parent / Path( js_main_entrypoint) elif len(js_dependency_files) == 1: js_main_entrypoint_path = npm_package_config_path.parent / Path( js_dependency_files[0]) webpack_config_file_path = Path( f'{npm_package_config_path.parent.name}-webpack.config.js') minimize_js = True if js_lib_name not in NO_MINIFY_JS_LIBS else False js_lib_file_extension = ".min.js" if minimize_js else ".js" with open(webpack_config_file_path, 'w') as webpack_config: webpack_config.write( f'var path = require("path");\n' f'module.exports = {{\n' f' entry: "./{js_main_entrypoint_path}",\n' f' output: {{\n' f' path: path.resolve(__dirname, "js_builds"),\n' f' filename: "{js_lib_name}-v{js_lib_version}{js_lib_file_extension}",\n' f' library: "{js_lib_name.replace("-", "_")}",\n' f' libraryTarget: "var",\n' f' }},\n' f' optimization: {{\n' f' minimize: {"true" if minimize_js else "false"}\n' f' }},\n' f' mode: "production",\n' f'}};') def main(): parser = argparse.ArgumentParser( description='Utils Class to support testing BigQuery UDFs') parser.add_argument( '--generate-js-libs-package-json', help='Generate package.json file necessary for building ' 'javascript libs for BigQuery UDFs', action='store_true') parser.add_argument( '--generate-webpack-configs', help='Generate webpack config files necessary for building ' 'javascript libs for BigQuery UDFs', action='store_true') args = parser.parse_args() if args.generate_js_libs_package_json: generate_js_libs_package_json() elif args.generate_webpack_configs: generate_webpack_configs() if __name__ == '__main__': main()
apache-2.0
peastman/msmbuilder
msmbuilder/lumping/pcca.py
6
4084
from __future__ import print_function, division, absolute_import import numpy as np from ..msm import MarkovStateModel class PCCA(MarkovStateModel): """Perron Cluster Cluster Analysis (PCCA) for coarse-graining (lumping) microstates into macrostates. Parameters ---------- n_macrostates : int The desired number of macrostates in the lumped model. kwargs : optional Additional keyword arguments to be passed to MarkovStateModel. See msmbuilder.msm.MarkovStateModel for possible options. Notes ----- PCCA is a subclass of MarkovStateModel. However, the MSM properties and attributes on PCCA refer to the MICROSTATE properties--e.g. pcca.transmat_ is the microstate transition matrix. To get the macrostate transition matrix, you must fit a new MarkovStateModel object on the output (assignments) of PCCA(). """ def __init__(self, n_macrostates, pcca_tolerance=1e-5, **kwargs): self.n_macrostates = n_macrostates self.pcca_tolerance = pcca_tolerance super(PCCA, self).__init__(**kwargs) def fit(self, sequences, y=None): """Fit a PCCA lumping model using a sequence of cluster assignments. Parameters ---------- sequences : list(np.ndarray(dtype='int')) List of arrays of cluster assignments y : None Unused, present for sklearn compatibility only. Returns ------- self """ super(PCCA, self).fit(sequences, y=y) self._do_lumping() return self def _do_lumping(self): """Do the PCCA lumping. Notes ------- 1. Iterate over the eigenvectors, starting with the slowest. 2. Calculate the spread of that eigenvector within each existing macrostate. 3. Pick the macrostate with the largest eigenvector spread. 4. Split the macrostate based on the sign of the eigenvector. """ # Extract non-perron eigenvectors right_eigenvectors = self.right_eigenvectors_[:, 1:] assert self.n_states_ > 0 microstate_mapping = np.zeros(self.n_states_, dtype=int) def spread(x): return x.max() - x.min() for i in range(self.n_macrostates - 1): v = right_eigenvectors[:, i] all_spreads = np.array([spread(v[microstate_mapping == k]) for k in range(i + 1)]) state_to_split = np.argmax(all_spreads) inds = ((microstate_mapping == state_to_split) & (v >= self.pcca_tolerance)) microstate_mapping[inds] = i + 1 self.microstate_mapping_ = microstate_mapping def partial_transform(self, sequence, mode='clip'): trimmed_sequence = super(PCCA, self).partial_transform(sequence, mode) if mode == 'clip': return [self.microstate_mapping_[seq] for seq in trimmed_sequence] elif mode == 'fill': def nan_get(x): try: x = int(x) return self.microstate_mapping_[x] except ValueError: return np.nan return np.asarray([nan_get(x) for x in trimmed_sequence]) else: raise ValueError @classmethod def from_msm(cls, msm, n_macrostates): """Create and fit lumped model from pre-existing MSM. Parameters ---------- msm : MarkovStateModel The input microstate msm to use. n_macrostates : int The number of macrostates Returns ------- lumper : cls The fit PCCA(+) object. """ params = msm.get_params() lumper = cls(n_macrostates, **params) lumper.transmat_ = msm.transmat_ lumper.populations_ = msm.populations_ lumper.mapping_ = msm.mapping_ lumper.countsmat_ = msm.countsmat_ lumper.n_states_ = msm.n_states_ lumper._do_lumping() return lumper
lgpl-2.1
cxhernandez/msmbuilder
msmbuilder/lumping/pcca.py
6
4084
from __future__ import print_function, division, absolute_import import numpy as np from ..msm import MarkovStateModel class PCCA(MarkovStateModel): """Perron Cluster Cluster Analysis (PCCA) for coarse-graining (lumping) microstates into macrostates. Parameters ---------- n_macrostates : int The desired number of macrostates in the lumped model. kwargs : optional Additional keyword arguments to be passed to MarkovStateModel. See msmbuilder.msm.MarkovStateModel for possible options. Notes ----- PCCA is a subclass of MarkovStateModel. However, the MSM properties and attributes on PCCA refer to the MICROSTATE properties--e.g. pcca.transmat_ is the microstate transition matrix. To get the macrostate transition matrix, you must fit a new MarkovStateModel object on the output (assignments) of PCCA(). """ def __init__(self, n_macrostates, pcca_tolerance=1e-5, **kwargs): self.n_macrostates = n_macrostates self.pcca_tolerance = pcca_tolerance super(PCCA, self).__init__(**kwargs) def fit(self, sequences, y=None): """Fit a PCCA lumping model using a sequence of cluster assignments. Parameters ---------- sequences : list(np.ndarray(dtype='int')) List of arrays of cluster assignments y : None Unused, present for sklearn compatibility only. Returns ------- self """ super(PCCA, self).fit(sequences, y=y) self._do_lumping() return self def _do_lumping(self): """Do the PCCA lumping. Notes ------- 1. Iterate over the eigenvectors, starting with the slowest. 2. Calculate the spread of that eigenvector within each existing macrostate. 3. Pick the macrostate with the largest eigenvector spread. 4. Split the macrostate based on the sign of the eigenvector. """ # Extract non-perron eigenvectors right_eigenvectors = self.right_eigenvectors_[:, 1:] assert self.n_states_ > 0 microstate_mapping = np.zeros(self.n_states_, dtype=int) def spread(x): return x.max() - x.min() for i in range(self.n_macrostates - 1): v = right_eigenvectors[:, i] all_spreads = np.array([spread(v[microstate_mapping == k]) for k in range(i + 1)]) state_to_split = np.argmax(all_spreads) inds = ((microstate_mapping == state_to_split) & (v >= self.pcca_tolerance)) microstate_mapping[inds] = i + 1 self.microstate_mapping_ = microstate_mapping def partial_transform(self, sequence, mode='clip'): trimmed_sequence = super(PCCA, self).partial_transform(sequence, mode) if mode == 'clip': return [self.microstate_mapping_[seq] for seq in trimmed_sequence] elif mode == 'fill': def nan_get(x): try: x = int(x) return self.microstate_mapping_[x] except ValueError: return np.nan return np.asarray([nan_get(x) for x in trimmed_sequence]) else: raise ValueError @classmethod def from_msm(cls, msm, n_macrostates): """Create and fit lumped model from pre-existing MSM. Parameters ---------- msm : MarkovStateModel The input microstate msm to use. n_macrostates : int The number of macrostates Returns ------- lumper : cls The fit PCCA(+) object. """ params = msm.get_params() lumper = cls(n_macrostates, **params) lumper.transmat_ = msm.transmat_ lumper.populations_ = msm.populations_ lumper.mapping_ = msm.mapping_ lumper.countsmat_ = msm.countsmat_ lumper.n_states_ = msm.n_states_ lumper._do_lumping() return lumper
lgpl-2.1
anntzer/scikit-learn
benchmarks/bench_hist_gradient_boosting_categorical_only.py
12
2623
import argparse from time import time from sklearn.preprocessing import KBinsDiscretizer from sklearn.datasets import make_classification from sklearn.ensemble import HistGradientBoostingClassifier from sklearn.ensemble._hist_gradient_boosting.utils import get_equivalent_estimator parser = argparse.ArgumentParser() parser.add_argument("--n-leaf-nodes", type=int, default=31) parser.add_argument("--n-trees", type=int, default=100) parser.add_argument("--n-features", type=int, default=20) parser.add_argument("--n-cats", type=int, default=20) parser.add_argument("--n-samples", type=int, default=10_000) parser.add_argument("--lightgbm", action="store_true", default=False) parser.add_argument("--learning-rate", type=float, default=0.1) parser.add_argument("--max-bins", type=int, default=255) parser.add_argument("--no-predict", action="store_true", default=False) parser.add_argument("--verbose", action="store_true", default=False) args = parser.parse_args() n_leaf_nodes = args.n_leaf_nodes n_features = args.n_features n_categories = args.n_cats n_samples = args.n_samples n_trees = args.n_trees lr = args.learning_rate max_bins = args.max_bins verbose = args.verbose def fit(est, data_train, target_train, libname, **fit_params): print(f"Fitting a {libname} model...") tic = time() est.fit(data_train, target_train, **fit_params) toc = time() print(f"fitted in {toc - tic:.3f}s") def predict(est, data_test): # We don't report accuracy or ROC because the dataset doesn't really make # sense: we treat ordered features as un-ordered categories. if args.no_predict: return tic = time() est.predict(data_test) toc = time() print(f"predicted in {toc - tic:.3f}s") X, y = make_classification(n_samples=n_samples, n_features=n_features, random_state=0) X = KBinsDiscretizer(n_bins=n_categories, encode="ordinal").fit_transform(X) print(f"Number of features: {n_features}") print(f"Number of samples: {n_samples}") is_categorical = [True] * n_features est = HistGradientBoostingClassifier( loss="log_loss", learning_rate=lr, max_iter=n_trees, max_bins=max_bins, max_leaf_nodes=n_leaf_nodes, categorical_features=is_categorical, early_stopping=False, random_state=0, verbose=verbose, ) fit(est, X, y, "sklearn") predict(est, X) if args.lightgbm: est = get_equivalent_estimator(est, lib="lightgbm", n_classes=2) est.set_params(max_cat_to_onehot=1) # dont use OHE categorical_features = list(range(n_features)) fit(est, X, y, "lightgbm", categorical_feature=categorical_features) predict(est, X)
bsd-3-clause
coderbone/SickRage
lib/guessit/__main__.py
29
12284
#!/usr/bin/env python # -*- coding: utf-8 -*- # # GuessIt - A library for guessing information from filenames # Copyright (c) 2013 Nicolas Wack <wackou@gmail.com> # Copyright (c) 2013 Rémi Alvergnat <toilal.dev@gmail.com> # # GuessIt is free software; you can redistribute it and/or modify it under # the terms of the Lesser GNU General Public License as published by # the Free Software Foundation; either version 3 of the License, or # (at your option) any later version. # # GuessIt is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # Lesser GNU General Public License for more details. # # You should have received a copy of the Lesser GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # from __future__ import absolute_import, division, print_function, unicode_literals from collections import defaultdict import logging import os from guessit import PY2, u, guess_file_info from guessit.options import get_opts from guessit.__version__ import __version__ def guess_file(filename, info='filename', options=None, **kwargs): options = options or {} filename = u(filename) if not options.get('yaml') and not options.get('show_property'): print('For:', filename) guess = guess_file_info(filename, info, options, **kwargs) if not options.get('unidentified'): try: del guess['unidentified'] except KeyError: pass if options.get('show_property'): print(guess.get(options.get('show_property'), '')) return if options.get('yaml'): import yaml for k, v in guess.items(): if isinstance(v, list) and len(v) == 1: guess[k] = v[0] ystr = yaml.safe_dump({filename: dict(guess)}, default_flow_style=False, allow_unicode=True) i = 0 for yline in ystr.splitlines(): if i == 0: print("? " + yline[:-1]) elif i == 1: print(":" + yline[1:]) else: print(yline) i += 1 return print('GuessIt found:', guess.nice_string(options.get('advanced'))) def _supported_properties(): all_properties = defaultdict(list) transformers_properties = [] from guessit.plugins import transformers for transformer in transformers.all_transformers(): supported_properties = transformer.supported_properties() transformers_properties.append((transformer, supported_properties)) if isinstance(supported_properties, dict): for property_name, possible_values in supported_properties.items(): all_properties[property_name].extend(possible_values) else: for property_name in supported_properties: all_properties[property_name] # just make sure it exists return all_properties, transformers_properties def display_transformers(): print('GuessIt transformers:') _, transformers_properties = _supported_properties() for transformer, _ in transformers_properties: print('[@] %s (%s)' % (transformer.name, transformer.priority)) def display_properties(options): values = options.values transformers = options.transformers name_only = options.name_only print('GuessIt properties:') all_properties, transformers_properties = _supported_properties() if name_only: # the 'container' property does not apply when using the --name-only # option del all_properties['container'] if transformers: for transformer, properties_list in transformers_properties: print('[@] %s (%s)' % (transformer.name, transformer.priority)) for property_name in properties_list: property_values = all_properties.get(property_name) print(' [+] %s' % (property_name,)) if property_values and values: _display_property_values(property_name, indent=4) else: properties_list = sorted(all_properties.keys()) for property_name in properties_list: property_values = all_properties.get(property_name) print(' [+] %s' % (property_name,)) if property_values and values: _display_property_values(property_name, indent=4) def _display_property_values(property_name, indent=2): all_properties, _ = _supported_properties() property_values = all_properties.get(property_name) for property_value in property_values: print(indent * ' ' + '[!] %s' % (property_value,)) def run_demo(episodes=True, movies=True, options=None): # NOTE: tests should not be added here but rather in the tests/ folder # this is just intended as a quick example if episodes: testeps = ['Series/Californication/Season 2/Californication.2x05.Vaginatown.HDTV.XviD-0TV.[tvu.org.ru].avi', 'Series/dexter/Dexter.5x02.Hello,.Bandit.ENG.-.sub.FR.HDTV.XviD-AlFleNi-TeaM.[tvu.org.ru].avi', 'Series/Treme/Treme.1x03.Right.Place,.Wrong.Time.HDTV.XviD-NoTV.[tvu.org.ru].avi', 'Series/Duckman/Duckman - 101 (01) - 20021107 - I, Duckman.avi', 'Series/Duckman/Duckman - S1E13 Joking The Chicken (unedited).avi', 'Series/Simpsons/The_simpsons_s13e18_-_i_am_furious_yellow.mpg', 'Series/Simpsons/Saison 12 Français/Simpsons,.The.12x08.A.Bas.Le.Sergent.Skinner.FR.[tvu.org.ru].avi', 'Series/Dr._Slump_-_002_DVB-Rip_Catalan_by_kelf.avi', 'Series/Kaamelott/Kaamelott - Livre V - Second Volet - HD 704x396 Xvid 2 pass - Son 5.1 - TntRip by Slurm.avi'] for f in testeps: print('-' * 80) guess_file(f, options=options, type='episode') if movies: testmovies = ['Movies/Fear and Loathing in Las Vegas (1998)/Fear.and.Loathing.in.Las.Vegas.720p.HDDVD.DTS.x264-ESiR.mkv', 'Movies/El Dia de la Bestia (1995)/El.dia.de.la.bestia.DVDrip.Spanish.DivX.by.Artik[SEDG].avi', 'Movies/Blade Runner (1982)/Blade.Runner.(1982).(Director\'s.Cut).CD1.DVDRip.XviD.AC3-WAF.avi', 'Movies/Dark City (1998)/Dark.City.(1998).DC.BDRip.720p.DTS.X264-CHD.mkv', 'Movies/Sin City (BluRay) (2005)/Sin.City.2005.BDRip.720p.x264.AC3-SEPTiC.mkv', 'Movies/Borat (2006)/Borat.(2006).R5.PROPER.REPACK.DVDRip.XviD-PUKKA.avi', '[XCT].Le.Prestige.(The.Prestige).DVDRip.[x264.HP.He-Aac.{Fr-Eng}.St{Fr-Eng}.Chaps].mkv', 'Battle Royale (2000)/Battle.Royale.(Batoru.Rowaiaru).(2000).(Special.Edition).CD1of2.DVDRiP.XviD-[ZeaL].avi', 'Movies/Brazil (1985)/Brazil_Criterion_Edition_(1985).CD2.English.srt', 'Movies/Persepolis (2007)/[XCT] Persepolis [H264+Aac-128(Fr-Eng)+ST(Fr-Eng)+Ind].mkv', 'Movies/Toy Story (1995)/Toy Story [HDTV 720p English-Spanish].mkv', 'Movies/Pirates of the Caribbean: The Curse of the Black Pearl (2003)/Pirates.Of.The.Carribean.DC.2003.iNT.DVDRip.XviD.AC3-NDRT.CD1.avi', 'Movies/Office Space (1999)/Office.Space.[Dual-DVDRip].[Spanish-English].[XviD-AC3-AC3].[by.Oswald].avi', 'Movies/The NeverEnding Story (1984)/The.NeverEnding.Story.1.1984.DVDRip.AC3.Xvid-Monteque.avi', 'Movies/Juno (2007)/Juno KLAXXON.avi', 'Movies/Chat noir, chat blanc (1998)/Chat noir, Chat blanc - Emir Kusturica (VO - VF - sub FR - Chapters).mkv', 'Movies/Wild Zero (2000)/Wild.Zero.DVDivX-EPiC.srt', 'Movies/El Bosque Animado (1987)/El.Bosque.Animado.[Jose.Luis.Cuerda.1987].[Xvid-Dvdrip-720x432].avi', 'testsmewt_bugs/movies/Baraka_Edition_Collector.avi' ] for f in testmovies: print('-' * 80) guess_file(f, options=options, type='movie') def submit_bug(filename, options): import requests # only import when needed from requests.exceptions import RequestException try: opts = dict((k, v) for k, v in options.__dict__.items() if v and k != 'submit_bug') r = requests.post('http://guessit.io/bugs', {'filename': filename, 'version': __version__, 'options': str(opts)}) if r.status_code == 200: print('Successfully submitted file: %s' % r.text) else: print('Could not submit bug at the moment, please try again later: %s %s' % (r.status_code, r.reason)) except RequestException as e: print('Could not submit bug at the moment, please try again later: %s' % e) def main(args=None, setup_logging=True): if setup_logging: from guessit import slogging slogging.setup_logging() if PY2: # pragma: no cover import codecs import locale import sys # see http://bugs.python.org/issue2128 if os.name == 'nt': for i, a in enumerate(sys.argv): sys.argv[i] = a.decode(locale.getpreferredencoding()) # see https://github.com/wackou/guessit/issues/43 # and http://stackoverflow.com/questions/4545661/unicodedecodeerror-when-redirecting-to-file # Wrap sys.stdout into a StreamWriter to allow writing unicode. sys.stdout = codecs.getwriter(locale.getpreferredencoding())(sys.stdout) # Needed for guessit.plugins.transformers.reload() to be called. from guessit.plugins import transformers if args: options = get_opts().parse_args(args) else: # pragma: no cover options = get_opts().parse_args() if options.verbose: logging.getLogger().setLevel(logging.DEBUG) help_required = True if options.properties or options.values: display_properties(options) help_required = False elif options.transformers: display_transformers() help_required = False if options.demo: run_demo(episodes=True, movies=True, options=vars(options)) help_required = False if options.version: print('+-------------------------------------------------------+') print('+ GuessIt ' + __version__ + (28-len(__version__)) * ' ' + '+') print('+-------------------------------------------------------+') print('| Please report any bug or feature request at |') print('| https://github.com/wackou/guessit/issues. |') print('+-------------------------------------------------------+') help_required = False if options.yaml: try: import yaml, babelfish def default_representer(dumper, data): return dumper.represent_str(str(data)) yaml.SafeDumper.add_representer(babelfish.Language, default_representer) yaml.SafeDumper.add_representer(babelfish.Country, default_representer) except ImportError: # pragma: no cover print('PyYAML not found. Using default output.') filenames = [] if options.filename: filenames.extend(options.filename) if options.input_file: input_file = open(options.input_file, 'r') try: filenames.extend([line.strip() for line in input_file.readlines()]) finally: input_file.close() filenames = filter(lambda f: f, filenames) if filenames: if options.submit_bug: for filename in filenames: help_required = False submit_bug(filename, options) else: for filename in filenames: help_required = False guess_file(filename, info=options.info.split(','), options=vars(options)) if help_required: # pragma: no cover get_opts().print_help() if __name__ == '__main__': main()
gpl-3.0
ECP-CANDLE/Benchmarks
Pilot3/P3B1/p3b1_baseline_keras2.py
1
9548
from __future__ import print_function import candle import numpy as np import p3b1 as bmk from sklearn.metrics import f1_score from tensorflow.keras import backend as K from tensorflow.keras.layers import Dense, Dropout, Input from tensorflow.keras.models import Model def initialize_parameters(default_model="p3b1_default_model.txt"): # Build benchmark object p3b1Bmk = bmk.BenchmarkP3B1( bmk.file_path, default_model, "keras", prog="p3b1_baseline", desc="Multi-task (DNN) for data extraction \ from clinical reports - Pilot 3 Benchmark 1", ) # Initialize parameters gParameters = candle.finalize_parameters(p3b1Bmk) # bmk.logger.info('Params: {}'.format(gParameters)) return gParameters def fetch_data(gParameters): """Downloads and decompresses the data if not locally available. Since the training data depends on the model definition it is not loaded, instead the local path where the raw data resides is returned """ path = gParameters["data_url"] fpath = candle.fetch_file(path + gParameters["train_data"], "Pilot3", unpack=True) return fpath def build_model( gParameters, kerasDefaults, shared_nnet_spec, individual_nnet_spec, input_dim, Y_train, Y_test, verbose=False, ): labels_train = [] labels_test = [] n_out_nodes = [] for idx in range(len(Y_train)): truth_train = np.array(Y_train[idx], dtype="int32") truth_test = np.array(Y_test[idx], dtype="int32") mv = int(np.max(truth_train)) label_train = np.zeros((len(truth_train), mv + 1)) for i in range(len(truth_train)): label_train[i, truth_train[i]] = 1 label_test = np.zeros((len(truth_test), mv + 1)) for i in range(len(truth_test)): label_test[i, truth_test[i]] = 1 labels_train.append(label_train) labels_test.append(label_test) n_out_nodes.append(mv + 1) shared_layers = [] # input layer layer = Input(shape=(input_dim,), name="input") shared_layers.append(layer) # shared layers for k in range(len(shared_nnet_spec)): layer = Dense( shared_nnet_spec[k], activation=gParameters["activation"], name="shared_layer_" + str(k), )(shared_layers[-1]) shared_layers.append(layer) if gParameters["dropout"] > 0: layer = Dropout(gParameters["dropout"])(shared_layers[-1]) shared_layers.append(layer) # individual layers indiv_layers_arr = [] models = [] trainable_count = 0 non_trainable_count = 0 for idx in range(len(individual_nnet_spec)): indiv_layers = [shared_layers[-1]] for k in range(len(individual_nnet_spec[idx]) + 1): if k < len(individual_nnet_spec[idx]): layer = Dense( individual_nnet_spec[idx][k], activation=gParameters["activation"], name="indiv_layer_" + str(idx) + "_" + str(k), )(indiv_layers[-1]) indiv_layers.append(layer) if gParameters["dropout"] > 0: layer = Dropout(gParameters["dropout"])(indiv_layers[-1]) indiv_layers.append(layer) else: layer = Dense( n_out_nodes[idx], activation=gParameters["out_activation"], name="out_" + str(idx), )(indiv_layers[-1]) indiv_layers.append(layer) indiv_layers_arr.append(indiv_layers) model = Model(inputs=[shared_layers[0]], outputs=[indiv_layers[-1]]) # calculate trainable/non-trainable param count for each model param_counts = candle.compute_trainable_params(model) trainable_count += param_counts["trainable_params"] non_trainable_count += param_counts["non_trainable_params"] models.append(model) # capture total param counts gParameters["trainable_params"] = trainable_count gParameters["non_trainable_params"] = non_trainable_count gParameters["total_params"] = trainable_count + non_trainable_count # Define optimizer optimizer = candle.build_optimizer( gParameters["optimizer"], gParameters["learning_rate"], kerasDefaults ) # DEBUG - verify if verbose: for k in range(len(models)): model = models[k] print("Model: ", k) model.summary() for k in range(len(models)): model = models[k] model.compile( loss=gParameters["loss"], optimizer=optimizer, metrics=[gParameters["metrics"]], ) return models, labels_train, labels_test def train_model( gParameters, models, X_train, Y_train, X_test, Y_test, fold, verbose=False ): base_run_id = gParameters["run_id"] for epoch in range(gParameters["epochs"]): for k in range(len(models)): model = models[k] gParameters["run_id"] = base_run_id + ".{}.{}.{}".format(fold, epoch, k) candleRemoteMonitor = candle.CandleRemoteMonitor(params=gParameters) timeoutMonitor = candle.TerminateOnTimeOut(gParameters["timeout"]) model.fit( {"input": X_train[k]}, {"out_" + str(k): Y_train[k]}, epochs=1, verbose=verbose, callbacks=[candleRemoteMonitor, timeoutMonitor], batch_size=gParameters["batch_size"], validation_data=(X_test[k], Y_test[k]), ) return models def evaluate_model(X_test, truths_test, labels_test, models): # retrieve truth-pred pair avg_loss = 0.0 ret = [] for k in range(len(models)): ret_k = [] feature_test = X_test[k] truth_test = truths_test[k] label_test = labels_test[k] model = models[k] loss = model.evaluate(feature_test, label_test) avg_loss = avg_loss + loss[0] print("In EVALUATE loss: ", loss) pred = model.predict(feature_test) ret_k.append(truth_test) ret_k.append(np.argmax(pred, axis=1)) ret.append(ret_k) avg_loss = avg_loss / float(len(models)) ret.append(avg_loss) return ret def run(gParameters): fpath = fetch_data(gParameters) # Get default parameters for initialization and optimizer functions kerasDefaults = candle.keras_default_config() # Construct structures common to all folds # shared_nnet_spec = [] # elem = gParameters['shared_nnet_spec'].split(',') # for el in elem: # shared_nnet_spec.append(int(el)) # individual_nnet_spec = [] # indiv = gParameters['ind_nnet_spec'].split(':') # for ind in indiv: # indiv_nnet_spec = [] # elem = ind.split(',') # for el in elem: # indiv_nnet_spec.append(int(el)) # individual_nnet_spec.append(indiv_nnet_spec) shared_nnet_spec = gParameters["shared_nnet_spec"] individual_nnet_spec = gParameters["ind_nnet_spec"] # Construct features common to all folds features = [] feat = gParameters["feature_names"].split(":") for f in feat: features.append(f) n_feat = len(feat) print("Feature names:") for i in range(n_feat): print(features[i]) # initialize arrays for all the features truth_array = [[] for _ in range(n_feat)] pred_array = [[] for _ in range(n_feat)] avg_loss = 0.0 # stdout display level verbose = True # per fold for fold in range(gParameters["n_fold"]): # build data X_train, Y_train, X_test, Y_test = bmk.build_data( len(individual_nnet_spec), fold, fpath ) # build model input_dim = len(X_train[0][0]) models, labels_train, labels_test = build_model( gParameters, kerasDefaults, shared_nnet_spec, individual_nnet_spec, input_dim, Y_train, Y_test, verbose, ) # train model models = train_model( gParameters, models, X_train, labels_train, X_test, labels_test, fold, verbose, ) # evaluate model ret = evaluate_model(X_test, Y_test, labels_test, models) for i in range(n_feat): truth_array[i].extend(ret[i][0]) pred_array[i].extend(ret[i][1]) avg_loss += ret[-1] avg_loss /= float(gParameters["n_fold"]) for task in range(n_feat): print( "Task", task + 1, ":", features[task], "- Macro F1 score", f1_score(truth_array[task], pred_array[task], average="macro"), ) print( "Task", task + 1, ":", features[task], "- Micro F1 score", f1_score(truth_array[task], pred_array[task], average="micro"), ) return avg_loss def main(): gParameters = initialize_parameters() avg_loss = run(gParameters) print("Average loss: ", avg_loss) if __name__ == "__main__": main() try: K.clear_session() except AttributeError: # theano does not have this function pass
mit
google/jax
jax/experimental/jax2tf/examples/saved_model_main.py
1
7779
# Copyright 2020 The JAX Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Demonstrates training models and saving the result as a SavedModel. By default, uses a pure JAX implementation of MNIST. There are flags to choose a Flax CNN version of MNIST, or to skip the training and just test a previously saved SavedModel. It is possible to save a batch-polymorphic version of the model, or a model prepared for specific batch sizes. Try --help to see all flags. This file is used both as an executable, and as a library in two other examples. See discussion in README.md. """ import logging import os from absl import app from absl import flags from jax.experimental.jax2tf.examples import mnist_lib # type: ignore from jax.experimental.jax2tf.examples import saved_model_lib # type: ignore import numpy as np import tensorflow as tf # type: ignore import tensorflow_datasets as tfds # type: ignore flags.DEFINE_enum("model", "mnist_flax", ["mnist_flax", "mnist_pure_jax"], "Which model to use.") flags.DEFINE_boolean("model_classifier_layer", True, ("The model should include the classifier layer, or just " "the last layer of logits. Set this to False when you " "want to reuse the classifier-less model in a larger " "model. See keras_reuse_main.py and README.md.")) flags.DEFINE_string("model_path", "/tmp/jax2tf/saved_models", "Path under which to save the SavedModel.") flags.DEFINE_integer("model_version", 1, ("The version number for the SavedModel. Needed for " "serving, larger versions will take precedence"), lower_bound=1) flags.DEFINE_integer("serving_batch_size", 1, "For what batch size to prepare the serving signature. " "Use -1 for converting and saving with batch polymorphism.") flags.register_validator( "serving_batch_size", lambda serving_batch_size: serving_batch_size > 0 or serving_batch_size == -1, message="--serving_batch_size must be either -1 or a positive integer.") flags.DEFINE_integer("num_epochs", 3, "For how many epochs to train.", lower_bound=1) flags.DEFINE_boolean( "generate_model", True, "Train and save a new model. Otherwise, use an existing SavedModel.") flags.DEFINE_boolean( "compile_model", True, "Enable TensorFlow jit_compiler for the SavedModel. This is " "necessary if you want to use the model for TensorFlow serving.") flags.DEFINE_boolean("show_model", True, "Show details of saved SavedModel.") flags.DEFINE_boolean( "show_images", False, "Plot some sample images with labels and inference results.") flags.DEFINE_boolean( "test_savedmodel", True, "Test TensorFlow inference using the SavedModel w.r.t. the JAX model.") FLAGS = flags.FLAGS def train_and_save(): logging.info("Loading the MNIST TensorFlow dataset") train_ds = mnist_lib.load_mnist( tfds.Split.TRAIN, batch_size=mnist_lib.train_batch_size) test_ds = mnist_lib.load_mnist( tfds.Split.TEST, batch_size=mnist_lib.test_batch_size) if FLAGS.show_images: mnist_lib.plot_images(train_ds, 1, 5, "Training images", inference_fn=None) the_model_class = pick_model_class() model_dir = savedmodel_dir(with_version=True) if FLAGS.generate_model: model_descr = model_description() logging.info("Generating model for %s", model_descr) (predict_fn, predict_params) = the_model_class.train( train_ds, test_ds, FLAGS.num_epochs, with_classifier=FLAGS.model_classifier_layer) if FLAGS.serving_batch_size == -1: # Batch-polymorphic SavedModel input_signatures = [ tf.TensorSpec((None,) + mnist_lib.input_shape, tf.float32), ] polymorphic_shapes = "(batch, ...)" else: input_signatures = [ # The first one will be the serving signature tf.TensorSpec((FLAGS.serving_batch_size,) + mnist_lib.input_shape, tf.float32), tf.TensorSpec((mnist_lib.train_batch_size,) + mnist_lib.input_shape, tf.float32), tf.TensorSpec((mnist_lib.test_batch_size,) + mnist_lib.input_shape, tf.float32), ] polymorphic_shapes = None logging.info("Saving model for %s", model_descr) saved_model_lib.convert_and_save_model( predict_fn, predict_params, model_dir, with_gradient=True, input_signatures=input_signatures, polymorphic_shapes=polymorphic_shapes, compile_model=FLAGS.compile_model) if FLAGS.test_savedmodel: tf_accelerator, tolerances = tf_accelerator_and_tolerances() with tf.device(tf_accelerator): logging.info("Testing savedmodel") pure_restored_model = tf.saved_model.load(model_dir) if FLAGS.show_images and FLAGS.model_classifier_layer: mnist_lib.plot_images( test_ds, 1, 5, f"Inference results for {model_descr}", inference_fn=pure_restored_model) test_input = np.ones( (mnist_lib.test_batch_size,) + mnist_lib.input_shape, dtype=np.float32) np.testing.assert_allclose( pure_restored_model(tf.convert_to_tensor(test_input)), predict_fn(predict_params, test_input), **tolerances) if FLAGS.show_model: def print_model(model_dir: str): cmd = f"saved_model_cli show --all --dir {model_dir}" print(cmd) os.system(cmd) print_model(model_dir) def pick_model_class(): """Picks one of PureJaxMNIST or FlaxMNIST.""" if FLAGS.model == "mnist_pure_jax": return mnist_lib.PureJaxMNIST elif FLAGS.model == "mnist_flax": return mnist_lib.FlaxMNIST else: raise ValueError(f"Unrecognized model: {FLAGS.model}") def model_description() -> str: """A short description of the picked model.""" res = pick_model_class().name if not FLAGS.model_classifier_layer: res += " (features_only)" return res def savedmodel_dir(with_version: bool = True) -> str: """The directory where we save the SavedModel.""" model_dir = os.path.join( FLAGS.model_path, FLAGS.model + ('' if FLAGS.model_classifier_layer else '_features') ) if with_version: model_dir = os.path.join(model_dir, str(FLAGS.model_version)) return model_dir def tf_accelerator_and_tolerances(): """Picks the TF accelerator to use and the tolerances for numerical checks.""" tf_accelerator = (tf.config.list_logical_devices("TPU") + tf.config.list_logical_devices("GPU") + tf.config.list_logical_devices("CPU"))[0] logging.info("Using tf_accelerator = %s", tf_accelerator) if tf_accelerator.device_type == "TPU": tolerances = dict(atol=1e-6, rtol=1e-6) elif tf_accelerator.device_type == "GPU": tolerances = dict(atol=1e-6, rtol=1e-4) elif tf_accelerator.device_type == "CPU": tolerances = dict(atol=1e-5, rtol=1e-5) logging.info("Using tolerances %s", tolerances) return tf_accelerator, tolerances if __name__ == "__main__": app.run(lambda _: train_and_save())
apache-2.0
BestSonny/examples
word_language_model/data.py
9
1439
import os import torch class Dictionary(object): def __init__(self): self.word2idx = {} self.idx2word = [] def add_word(self, word): if word not in self.word2idx: self.idx2word.append(word) self.word2idx[word] = len(self.idx2word) - 1 return self.word2idx[word] def __len__(self): return len(self.idx2word) class Corpus(object): def __init__(self, path): self.dictionary = Dictionary() self.train = self.tokenize(os.path.join(path, 'train.txt')) self.valid = self.tokenize(os.path.join(path, 'valid.txt')) self.test = self.tokenize(os.path.join(path, 'test.txt')) def tokenize(self, path): """Tokenizes a text file.""" assert os.path.exists(path) # Add words to the dictionary with open(path, 'r') as f: tokens = 0 for line in f: words = line.split() + ['<eos>'] tokens += len(words) for word in words: self.dictionary.add_word(word) # Tokenize file content with open(path, 'r') as f: ids = torch.LongTensor(tokens) token = 0 for line in f: words = line.split() + ['<eos>'] for word in words: ids[token] = self.dictionary.word2idx[word] token += 1 return ids
bsd-3-clause
ECP-CANDLE/Benchmarks
common/darts/architecture.py
1
6846
import darts.functional as F import torch from torch import autograd, optim class Hyperparameters: alpha_lr = 3e-4 alpha_wd = 1e-3 class Architecture: def __init__(self, model, args, hyperparams=Hyperparameters(), device="cpu"): self.momentum = args.momentum # momentum for optimizer of theta self.wd = args.weight_decay # weight decay for optimizer of model's theta self.model = model # main model with respect to theta and alpha self.device = device # this is the optimizer to optimize alpha parameter self.optimizer = optim.Adam( self.model.arch_parameters(), lr=hyperparams.alpha_lr, betas=(0.5, 0.999), weight_decay=hyperparams.alpha_wd, ) def comp_unrolled_model(self, data, target, eta, optimizer): """Loss on train set and then update w_pi, not-in-place Parameters ---------- data : torch.tensor target : torch.tensor eta : float optimizer : torch.optim.optimizer optimizer of theta, not optimizer of alpha Returns ------- model_unrolled """ # forward to get loss loss = self.model.loss(data, target) # flatten current weights theta = F.flatten(self.model.parameters()).detach() try: # fetch momentum data from theta optimizer moment = F.flatten( optimizer.state[v]["momentum_buffer"] for v in self.model.parameters() ) moment.mul_(self.momentum) except Exception: moment = torch.zeros_like(theta) # flatten all gradients dtheta = F.flatten(autograd.grad(loss, self.model.parameters())).data # indeed, here we implement a simple SGD with momentum and weight decay # theta = theta - eta * (moment + weight decay + dtheta) theta = theta.sub(eta, moment + dtheta + self.wd * theta) # construct a new model unrolled_model = self.construct_model_from_theta(theta) return unrolled_model.to(self.device) def step( self, x_train, target_train, x_valid, target_valid, eta, optimizer, unrolled ): """ update alpha parameter by manually computing the gradients :param x_train: :param target_train: :param x_valid: :param target_valid: :param eta: :param optimizer: theta optimizer :param unrolled: :return: """ # alpha optimizer self.optimizer.zero_grad() # compute the gradient and write it into tensor.grad # instead of generated by loss.backward() if unrolled: self.backward_step_unrolled( x_train, target_train, x_valid, target_valid, eta, optimizer ) else: # directly optimize alpha on w, instead of w_pi self.backward_step(x_valid, target_valid) self.optimizer.step() def backward_step(self, x_valid, target_valid): """ simply train on validate set and backward :param x_valid: :param target_valid: :return: """ _, loss = self.model.loss(x_valid, target_valid, reduce="mean") # both alpha and theta require grad but only alpha optimizer will # step in current phase. loss.backward() def backward_step_unrolled( self, x_train, target_train, x_valid, target_valid, eta, optimizer ): """ train on validate set based on update w_pi :param x_train: :param target_train: :param x_valid: :param target_valid: :param eta: 0.01, according to author's comments :param optimizer: theta optimizer :return: """ # theta_pi = theta - lr * grad unrolled_model = self.comp_unrolled_model(x_train, target_train, eta, optimizer) # calculate loss on theta_pi unrolled_loss = unrolled_model.loss(x_valid, target_valid) # this will update theta_pi model, but NOT theta model unrolled_loss.backward() # grad(L(w', a), a), part of Eq. 6 dalpha = [v.grad for v in unrolled_model.arch_parameters()] vector = [v.grad.data for v in unrolled_model.parameters()] implicit_grads = self.hessian_vector_product(vector, x_train, target_train) for g, ig in zip(dalpha, implicit_grads): # g = g - eta * ig, from Eq. 6 g.data.sub_(eta, ig.data) # write updated alpha into original model for v, g in zip(self.model.arch_parameters(), dalpha): if v.grad is None: v.grad = g.data else: v.grad.data.copy_(g.data) def construct_model_from_theta(self, theta): """ construct a new model with initialized weight from theta it use .state_dict() and load_state_dict() instead of .parameters() + fill_() :param theta: flatten weights, need to reshape to original shape :return: """ model = self.model.new() state_dict = self.model.state_dict() params, offset = {}, 0 for k, v in self.model.named_parameters(): v_length = v.numel() # restore theta[] value to original shape params[k] = theta[offset : offset + v_length].view(v.size()) offset += v_length assert offset == len(theta) state_dict.update(params) model.load_state_dict(state_dict) model.to(self.device) return model def hessian_vector_product(self, vector, data, target, r=1e-2): """ slightly touch vector value to estimate the gradient with respect to alpha refer to Eq. 7 for more details. :param vector: gradient.data of parameters theta :param x: :param target: :param r: :return: """ R = r / F.flatten(vector).norm() for p, v in zip(self.model.parameters(), vector): # w+ = w + R * v p.data.add_(R, v) loss = self.model.loss(data, target) # gradient with respect to alpha grads_p = autograd.grad(loss, self.model.arch_parameters()) for p, v in zip(self.model.parameters(), vector): # w- = (w+R*v) - 2R*v p.data.sub_(2 * R, v) loss = self.model.loss(data, target) grads_n = autograd.grad(loss, self.model.arch_parameters()) for p, v in zip(self.model.parameters(), vector): # w = (w+R*v) - 2R*v + R*v p.data.add_(R, v) h = [(x - y).div_(2 * R) for x, y in zip(grads_p, grads_n)] # h len: 2 h0 torch.Size([14, 8]) # print('h len:', len(h), 'h0', h[0].shape) return h
mit
anntzer/scikit-learn
sklearn/impute/_base.py
7
38334
# Authors: Nicolas Tresegnie <nicolas.tresegnie@gmail.com> # Sergey Feldman <sergeyfeldman@gmail.com> # License: BSD 3 clause import numbers import warnings from collections import Counter import numpy as np import numpy.ma as ma from scipy import sparse as sp from ..base import BaseEstimator, TransformerMixin from ..utils._param_validation import StrOptions, Hidden from ..utils.fixes import _mode from ..utils.sparsefuncs import _get_median from ..utils.validation import check_is_fitted from ..utils.validation import FLOAT_DTYPES from ..utils.validation import _check_feature_names_in from ..utils._mask import _get_mask from ..utils import _is_pandas_na from ..utils import is_scalar_nan def _check_inputs_dtype(X, missing_values): if _is_pandas_na(missing_values): # Allow using `pd.NA` as missing values to impute numerical arrays. return if X.dtype.kind in ("f", "i", "u") and not isinstance(missing_values, numbers.Real): raise ValueError( "'X' and 'missing_values' types are expected to be" " both numerical. Got X.dtype={} and " " type(missing_values)={}.".format(X.dtype, type(missing_values)) ) def _most_frequent(array, extra_value, n_repeat): """Compute the most frequent value in a 1d array extended with [extra_value] * n_repeat, where extra_value is assumed to be not part of the array.""" # Compute the most frequent value in array only if array.size > 0: if array.dtype == object: # scipy.stats.mode is slow with object dtype array. # Python Counter is more efficient counter = Counter(array) most_frequent_count = counter.most_common(1)[0][1] # tie breaking similarly to scipy.stats.mode most_frequent_value = min( value for value, count in counter.items() if count == most_frequent_count ) else: mode = _mode(array) most_frequent_value = mode[0][0] most_frequent_count = mode[1][0] else: most_frequent_value = 0 most_frequent_count = 0 # Compare to array + [extra_value] * n_repeat if most_frequent_count == 0 and n_repeat == 0: return np.nan elif most_frequent_count < n_repeat: return extra_value elif most_frequent_count > n_repeat: return most_frequent_value elif most_frequent_count == n_repeat: # tie breaking similarly to scipy.stats.mode return min(most_frequent_value, extra_value) class _BaseImputer(TransformerMixin, BaseEstimator): """Base class for all imputers. It adds automatically support for `add_indicator`. """ _parameter_constraints: dict = { "missing_values": ["missing_values"], "add_indicator": ["boolean"], } def __init__(self, *, missing_values=np.nan, add_indicator=False): self.missing_values = missing_values self.add_indicator = add_indicator def _fit_indicator(self, X): """Fit a MissingIndicator.""" if self.add_indicator: self.indicator_ = MissingIndicator( missing_values=self.missing_values, error_on_new=False ) self.indicator_._fit(X, precomputed=True) else: self.indicator_ = None def _transform_indicator(self, X): """Compute the indicator mask.' Note that X must be the original data as passed to the imputer before any imputation, since imputation may be done inplace in some cases. """ if self.add_indicator: if not hasattr(self, "indicator_"): raise ValueError( "Make sure to call _fit_indicator before _transform_indicator" ) return self.indicator_.transform(X) def _concatenate_indicator(self, X_imputed, X_indicator): """Concatenate indicator mask with the imputed data.""" if not self.add_indicator: return X_imputed hstack = sp.hstack if sp.issparse(X_imputed) else np.hstack if X_indicator is None: raise ValueError( "Data from the missing indicator are not provided. Call " "_fit_indicator and _transform_indicator in the imputer " "implementation." ) return hstack((X_imputed, X_indicator)) def _concatenate_indicator_feature_names_out(self, names, input_features): if not self.add_indicator: return names indicator_names = self.indicator_.get_feature_names_out(input_features) return np.concatenate([names, indicator_names]) def _more_tags(self): return {"allow_nan": is_scalar_nan(self.missing_values)} class SimpleImputer(_BaseImputer): """Univariate imputer for completing missing values with simple strategies. Replace missing values using a descriptive statistic (e.g. mean, median, or most frequent) along each column, or using a constant value. Read more in the :ref:`User Guide <impute>`. .. versionadded:: 0.20 `SimpleImputer` replaces the previous `sklearn.preprocessing.Imputer` estimator which is now removed. Parameters ---------- missing_values : int, float, str, np.nan, None or pandas.NA, default=np.nan The placeholder for the missing values. All occurrences of `missing_values` will be imputed. For pandas' dataframes with nullable integer dtypes with missing values, `missing_values` can be set to either `np.nan` or `pd.NA`. strategy : str, default='mean' The imputation strategy. - If "mean", then replace missing values using the mean along each column. Can only be used with numeric data. - If "median", then replace missing values using the median along each column. Can only be used with numeric data. - If "most_frequent", then replace missing using the most frequent value along each column. Can be used with strings or numeric data. If there is more than one such value, only the smallest is returned. - If "constant", then replace missing values with fill_value. Can be used with strings or numeric data. .. versionadded:: 0.20 strategy="constant" for fixed value imputation. fill_value : str or numerical value, default=None When strategy == "constant", fill_value is used to replace all occurrences of missing_values. If left to the default, fill_value will be 0 when imputing numerical data and "missing_value" for strings or object data types. verbose : int, default=0 Controls the verbosity of the imputer. .. deprecated:: 1.1 The 'verbose' parameter was deprecated in version 1.1 and will be removed in 1.3. A warning will always be raised upon the removal of empty columns in the future version. copy : bool, default=True If True, a copy of X will be created. If False, imputation will be done in-place whenever possible. Note that, in the following cases, a new copy will always be made, even if `copy=False`: - If `X` is not an array of floating values; - If `X` is encoded as a CSR matrix; - If `add_indicator=True`. add_indicator : bool, default=False If True, a :class:`MissingIndicator` transform will stack onto output of the imputer's transform. This allows a predictive estimator to account for missingness despite imputation. If a feature has no missing values at fit/train time, the feature won't appear on the missing indicator even if there are missing values at transform/test time. Attributes ---------- statistics_ : array of shape (n_features,) The imputation fill value for each feature. Computing statistics can result in `np.nan` values. During :meth:`transform`, features corresponding to `np.nan` statistics will be discarded. indicator_ : :class:`~sklearn.impute.MissingIndicator` Indicator used to add binary indicators for missing values. `None` if `add_indicator=False`. n_features_in_ : int Number of features seen during :term:`fit`. .. versionadded:: 0.24 feature_names_in_ : ndarray of shape (`n_features_in_`,) Names of features seen during :term:`fit`. Defined only when `X` has feature names that are all strings. .. versionadded:: 1.0 See Also -------- IterativeImputer : Multivariate imputer that estimates values to impute for each feature with missing values from all the others. KNNImputer : Multivariate imputer that estimates missing features using nearest samples. Notes ----- Columns which only contained missing values at :meth:`fit` are discarded upon :meth:`transform` if strategy is not `"constant"`. In a prediction context, simple imputation usually performs poorly when associated with a weak learner. However, with a powerful learner, it can lead to as good or better performance than complex imputation such as :class:`~sklearn.impute.IterativeImputer` or :class:`~sklearn.impute.KNNImputer`. Examples -------- >>> import numpy as np >>> from sklearn.impute import SimpleImputer >>> imp_mean = SimpleImputer(missing_values=np.nan, strategy='mean') >>> imp_mean.fit([[7, 2, 3], [4, np.nan, 6], [10, 5, 9]]) SimpleImputer() >>> X = [[np.nan, 2, 3], [4, np.nan, 6], [10, np.nan, 9]] >>> print(imp_mean.transform(X)) [[ 7. 2. 3. ] [ 4. 3.5 6. ] [10. 3.5 9. ]] """ _parameter_constraints: dict = { **_BaseImputer._parameter_constraints, "strategy": [StrOptions({"mean", "median", "most_frequent", "constant"})], "fill_value": "no_validation", # any object is valid "verbose": ["verbose", Hidden(StrOptions({"deprecated"}))], "copy": ["boolean"], } def __init__( self, *, missing_values=np.nan, strategy="mean", fill_value=None, verbose="deprecated", copy=True, add_indicator=False, ): super().__init__(missing_values=missing_values, add_indicator=add_indicator) self.strategy = strategy self.fill_value = fill_value self.verbose = verbose self.copy = copy def _validate_input(self, X, in_fit): if self.strategy in ("most_frequent", "constant"): # If input is a list of strings, dtype = object. # Otherwise ValueError is raised in SimpleImputer # with strategy='most_frequent' or 'constant' # because the list is converted to Unicode numpy array if isinstance(X, list) and any( isinstance(elem, str) for row in X for elem in row ): dtype = object else: dtype = None else: dtype = FLOAT_DTYPES if not in_fit and self._fit_dtype.kind == "O": # Use object dtype if fitted on object dtypes dtype = self._fit_dtype if _is_pandas_na(self.missing_values) or is_scalar_nan(self.missing_values): force_all_finite = "allow-nan" else: force_all_finite = True try: X = self._validate_data( X, reset=in_fit, accept_sparse="csc", dtype=dtype, force_all_finite=force_all_finite, copy=self.copy, ) except ValueError as ve: if "could not convert" in str(ve): new_ve = ValueError( "Cannot use {} strategy with non-numeric data:\n{}".format( self.strategy, ve ) ) raise new_ve from None else: raise ve if in_fit: # Use the dtype seen in `fit` for non-`fit` conversion self._fit_dtype = X.dtype _check_inputs_dtype(X, self.missing_values) if X.dtype.kind not in ("i", "u", "f", "O"): raise ValueError( "SimpleImputer does not support data with dtype " "{0}. Please provide either a numeric array (with" " a floating point or integer dtype) or " "categorical data represented either as an array " "with integer dtype or an array of string values " "with an object dtype.".format(X.dtype) ) return X def fit(self, X, y=None): """Fit the imputer on `X`. Parameters ---------- X : {array-like, sparse matrix}, shape (n_samples, n_features) Input data, where `n_samples` is the number of samples and `n_features` is the number of features. y : Ignored Not used, present here for API consistency by convention. Returns ------- self : object Fitted estimator. """ self._validate_params() if self.verbose != "deprecated": warnings.warn( "The 'verbose' parameter was deprecated in version " "1.1 and will be removed in 1.3. A warning will " "always be raised upon the removal of empty columns " "in the future version.", FutureWarning, ) X = self._validate_input(X, in_fit=True) # default fill_value is 0 for numerical input and "missing_value" # otherwise if self.fill_value is None: if X.dtype.kind in ("i", "u", "f"): fill_value = 0 else: fill_value = "missing_value" else: fill_value = self.fill_value # fill_value should be numerical in case of numerical input if ( self.strategy == "constant" and X.dtype.kind in ("i", "u", "f") and not isinstance(fill_value, numbers.Real) ): raise ValueError( "'fill_value'={0} is invalid. Expected a " "numerical value when imputing numerical " "data".format(fill_value) ) if sp.issparse(X): # missing_values = 0 not allowed with sparse data as it would # force densification if self.missing_values == 0: raise ValueError( "Imputation not possible when missing_values " "== 0 and input is sparse. Provide a dense " "array instead." ) else: self.statistics_ = self._sparse_fit( X, self.strategy, self.missing_values, fill_value ) else: self.statistics_ = self._dense_fit( X, self.strategy, self.missing_values, fill_value ) return self def _sparse_fit(self, X, strategy, missing_values, fill_value): """Fit the transformer on sparse data.""" missing_mask = _get_mask(X, missing_values) mask_data = missing_mask.data n_implicit_zeros = X.shape[0] - np.diff(X.indptr) statistics = np.empty(X.shape[1]) if strategy == "constant": # for constant strategy, self.statistcs_ is used to store # fill_value in each column statistics.fill(fill_value) else: for i in range(X.shape[1]): column = X.data[X.indptr[i] : X.indptr[i + 1]] mask_column = mask_data[X.indptr[i] : X.indptr[i + 1]] column = column[~mask_column] # combine explicit and implicit zeros mask_zeros = _get_mask(column, 0) column = column[~mask_zeros] n_explicit_zeros = mask_zeros.sum() n_zeros = n_implicit_zeros[i] + n_explicit_zeros if strategy == "mean": s = column.size + n_zeros statistics[i] = np.nan if s == 0 else column.sum() / s elif strategy == "median": statistics[i] = _get_median(column, n_zeros) elif strategy == "most_frequent": statistics[i] = _most_frequent(column, 0, n_zeros) super()._fit_indicator(missing_mask) return statistics def _dense_fit(self, X, strategy, missing_values, fill_value): """Fit the transformer on dense data.""" missing_mask = _get_mask(X, missing_values) masked_X = ma.masked_array(X, mask=missing_mask) super()._fit_indicator(missing_mask) # Mean if strategy == "mean": mean_masked = np.ma.mean(masked_X, axis=0) # Avoid the warning "Warning: converting a masked element to nan." mean = np.ma.getdata(mean_masked) mean[np.ma.getmask(mean_masked)] = np.nan return mean # Median elif strategy == "median": median_masked = np.ma.median(masked_X, axis=0) # Avoid the warning "Warning: converting a masked element to nan." median = np.ma.getdata(median_masked) median[np.ma.getmaskarray(median_masked)] = np.nan return median # Most frequent elif strategy == "most_frequent": # Avoid use of scipy.stats.mstats.mode due to the required # additional overhead and slow benchmarking performance. # See Issue 14325 and PR 14399 for full discussion. # To be able access the elements by columns X = X.transpose() mask = missing_mask.transpose() if X.dtype.kind == "O": most_frequent = np.empty(X.shape[0], dtype=object) else: most_frequent = np.empty(X.shape[0]) for i, (row, row_mask) in enumerate(zip(X[:], mask[:])): row_mask = np.logical_not(row_mask).astype(bool) row = row[row_mask] most_frequent[i] = _most_frequent(row, np.nan, 0) return most_frequent # Constant elif strategy == "constant": # for constant strategy, self.statistcs_ is used to store # fill_value in each column return np.full(X.shape[1], fill_value, dtype=X.dtype) def transform(self, X): """Impute all missing values in `X`. Parameters ---------- X : {array-like, sparse matrix}, shape (n_samples, n_features) The input data to complete. Returns ------- X_imputed : {ndarray, sparse matrix} of shape \ (n_samples, n_features_out) `X` with imputed values. """ check_is_fitted(self) X = self._validate_input(X, in_fit=False) statistics = self.statistics_ if X.shape[1] != statistics.shape[0]: raise ValueError( "X has %d features per sample, expected %d" % (X.shape[1], self.statistics_.shape[0]) ) # compute mask before eliminating invalid features missing_mask = _get_mask(X, self.missing_values) # Delete the invalid columns if strategy is not constant if self.strategy == "constant": valid_statistics = statistics valid_statistics_indexes = None else: # same as np.isnan but also works for object dtypes invalid_mask = _get_mask(statistics, np.nan) valid_mask = np.logical_not(invalid_mask) valid_statistics = statistics[valid_mask] valid_statistics_indexes = np.flatnonzero(valid_mask) if invalid_mask.any(): invalid_features = np.arange(X.shape[1])[invalid_mask] if self.verbose != "deprecated" and self.verbose: # use feature names warning if features are provided if hasattr(self, "feature_names_in_"): invalid_features = self.feature_names_in_[invalid_features] warnings.warn( "Skipping features without any observed values:" f" {invalid_features}. At least one non-missing value is needed" f" for imputation with strategy='{self.strategy}'." ) X = X[:, valid_statistics_indexes] # Do actual imputation if sp.issparse(X): if self.missing_values == 0: raise ValueError( "Imputation not possible when missing_values " "== 0 and input is sparse. Provide a dense " "array instead." ) else: # if no invalid statistics are found, use the mask computed # before, else recompute mask if valid_statistics_indexes is None: mask = missing_mask.data else: mask = _get_mask(X.data, self.missing_values) indexes = np.repeat( np.arange(len(X.indptr) - 1, dtype=int), np.diff(X.indptr) )[mask] X.data[mask] = valid_statistics[indexes].astype(X.dtype, copy=False) else: # use mask computed before eliminating invalid mask if valid_statistics_indexes is None: mask_valid_features = missing_mask else: mask_valid_features = missing_mask[:, valid_statistics_indexes] n_missing = np.sum(mask_valid_features, axis=0) values = np.repeat(valid_statistics, n_missing) coordinates = np.where(mask_valid_features.transpose())[::-1] X[coordinates] = values X_indicator = super()._transform_indicator(missing_mask) return super()._concatenate_indicator(X, X_indicator) def inverse_transform(self, X): """Convert the data back to the original representation. Inverts the `transform` operation performed on an array. This operation can only be performed after :class:`SimpleImputer` is instantiated with `add_indicator=True`. Note that `inverse_transform` can only invert the transform in features that have binary indicators for missing values. If a feature has no missing values at `fit` time, the feature won't have a binary indicator, and the imputation done at `transform` time won't be inverted. .. versionadded:: 0.24 Parameters ---------- X : array-like of shape \ (n_samples, n_features + n_features_missing_indicator) The imputed data to be reverted to original data. It has to be an augmented array of imputed data and the missing indicator mask. Returns ------- X_original : ndarray of shape (n_samples, n_features) The original `X` with missing values as it was prior to imputation. """ check_is_fitted(self) if not self.add_indicator: raise ValueError( "'inverse_transform' works only when " "'SimpleImputer' is instantiated with " "'add_indicator=True'. " f"Got 'add_indicator={self.add_indicator}' " "instead." ) n_features_missing = len(self.indicator_.features_) non_empty_feature_count = X.shape[1] - n_features_missing array_imputed = X[:, :non_empty_feature_count].copy() missing_mask = X[:, non_empty_feature_count:].astype(bool) n_features_original = len(self.statistics_) shape_original = (X.shape[0], n_features_original) X_original = np.zeros(shape_original) X_original[:, self.indicator_.features_] = missing_mask full_mask = X_original.astype(bool) imputed_idx, original_idx = 0, 0 while imputed_idx < len(array_imputed.T): if not np.all(X_original[:, original_idx]): X_original[:, original_idx] = array_imputed.T[imputed_idx] imputed_idx += 1 original_idx += 1 else: original_idx += 1 X_original[full_mask] = self.missing_values return X_original def _more_tags(self): return { "allow_nan": ( _is_pandas_na(self.missing_values) or is_scalar_nan(self.missing_values) ) } def get_feature_names_out(self, input_features=None): """Get output feature names for transformation. Parameters ---------- input_features : array-like of str or None, default=None Input features. - If `input_features` is `None`, then `feature_names_in_` is used as feature names in. If `feature_names_in_` is not defined, then the following input feature names are generated: `["x0", "x1", ..., "x(n_features_in_ - 1)"]`. - If `input_features` is an array-like, then `input_features` must match `feature_names_in_` if `feature_names_in_` is defined. Returns ------- feature_names_out : ndarray of str objects Transformed feature names. """ input_features = _check_feature_names_in(self, input_features) non_missing_mask = np.logical_not(_get_mask(self.statistics_, np.nan)) names = input_features[non_missing_mask] return self._concatenate_indicator_feature_names_out(names, input_features) class MissingIndicator(TransformerMixin, BaseEstimator): """Binary indicators for missing values. Note that this component typically should not be used in a vanilla :class:`Pipeline` consisting of transformers and a classifier, but rather could be added using a :class:`FeatureUnion` or :class:`ColumnTransformer`. Read more in the :ref:`User Guide <impute>`. .. versionadded:: 0.20 Parameters ---------- missing_values : int, float, str, np.nan or None, default=np.nan The placeholder for the missing values. All occurrences of `missing_values` will be imputed. For pandas' dataframes with nullable integer dtypes with missing values, `missing_values` should be set to `np.nan`, since `pd.NA` will be converted to `np.nan`. features : {'missing-only', 'all'}, default='missing-only' Whether the imputer mask should represent all or a subset of features. - If `'missing-only'` (default), the imputer mask will only represent features containing missing values during fit time. - If `'all'`, the imputer mask will represent all features. sparse : bool or 'auto', default='auto' Whether the imputer mask format should be sparse or dense. - If `'auto'` (default), the imputer mask will be of same type as input. - If `True`, the imputer mask will be a sparse matrix. - If `False`, the imputer mask will be a numpy array. error_on_new : bool, default=True If `True`, :meth:`transform` will raise an error when there are features with missing values that have no missing values in :meth:`fit`. This is applicable only when `features='missing-only'`. Attributes ---------- features_ : ndarray of shape (n_missing_features,) or (n_features,) The features indices which will be returned when calling :meth:`transform`. They are computed during :meth:`fit`. If `features='all'`, `features_` is equal to `range(n_features)`. n_features_in_ : int Number of features seen during :term:`fit`. .. versionadded:: 0.24 feature_names_in_ : ndarray of shape (`n_features_in_`,) Names of features seen during :term:`fit`. Defined only when `X` has feature names that are all strings. .. versionadded:: 1.0 See Also -------- SimpleImputer : Univariate imputation of missing values. IterativeImputer : Multivariate imputation of missing values. Examples -------- >>> import numpy as np >>> from sklearn.impute import MissingIndicator >>> X1 = np.array([[np.nan, 1, 3], ... [4, 0, np.nan], ... [8, 1, 0]]) >>> X2 = np.array([[5, 1, np.nan], ... [np.nan, 2, 3], ... [2, 4, 0]]) >>> indicator = MissingIndicator() >>> indicator.fit(X1) MissingIndicator() >>> X2_tr = indicator.transform(X2) >>> X2_tr array([[False, True], [ True, False], [False, False]]) """ _parameter_constraints: dict = { "missing_values": [numbers.Real, numbers.Integral, str, None], "features": [StrOptions({"missing-only", "all"})], "sparse": ["boolean", StrOptions({"auto"})], "error_on_new": ["boolean"], } def __init__( self, *, missing_values=np.nan, features="missing-only", sparse="auto", error_on_new=True, ): self.missing_values = missing_values self.features = features self.sparse = sparse self.error_on_new = error_on_new def _get_missing_features_info(self, X): """Compute the imputer mask and the indices of the features containing missing values. Parameters ---------- X : {ndarray, sparse matrix} of shape (n_samples, n_features) The input data with missing values. Note that `X` has been checked in :meth:`fit` and :meth:`transform` before to call this function. Returns ------- imputer_mask : {ndarray, sparse matrix} of shape \ (n_samples, n_features) The imputer mask of the original data. features_with_missing : ndarray of shape (n_features_with_missing) The features containing missing values. """ if not self._precomputed: imputer_mask = _get_mask(X, self.missing_values) else: imputer_mask = X if sp.issparse(X): imputer_mask.eliminate_zeros() if self.features == "missing-only": n_missing = imputer_mask.getnnz(axis=0) if self.sparse is False: imputer_mask = imputer_mask.toarray() elif imputer_mask.format == "csr": imputer_mask = imputer_mask.tocsc() else: if not self._precomputed: imputer_mask = _get_mask(X, self.missing_values) else: imputer_mask = X if self.features == "missing-only": n_missing = imputer_mask.sum(axis=0) if self.sparse is True: imputer_mask = sp.csc_matrix(imputer_mask) if self.features == "all": features_indices = np.arange(X.shape[1]) else: features_indices = np.flatnonzero(n_missing) return imputer_mask, features_indices def _validate_input(self, X, in_fit): if not is_scalar_nan(self.missing_values): force_all_finite = True else: force_all_finite = "allow-nan" X = self._validate_data( X, reset=in_fit, accept_sparse=("csc", "csr"), dtype=None, force_all_finite=force_all_finite, ) _check_inputs_dtype(X, self.missing_values) if X.dtype.kind not in ("i", "u", "f", "O"): raise ValueError( "MissingIndicator does not support data with " "dtype {0}. Please provide either a numeric array" " (with a floating point or integer dtype) or " "categorical data represented either as an array " "with integer dtype or an array of string values " "with an object dtype.".format(X.dtype) ) if sp.issparse(X) and self.missing_values == 0: # missing_values = 0 not allowed with sparse data as it would # force densification raise ValueError( "Sparse input with missing_values=0 is " "not supported. Provide a dense " "array instead." ) return X def _fit(self, X, y=None, precomputed=False): """Fit the transformer on `X`. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) Input data, where `n_samples` is the number of samples and `n_features` is the number of features. If `precomputed=True`, then `X` is a mask of the input data. precomputed : bool Whether the input data is a mask. Returns ------- imputer_mask : {ndarray, sparse matrix} of shape (n_samples, \ n_features) The imputer mask of the original data. """ if precomputed: if not (hasattr(X, "dtype") and X.dtype.kind == "b"): raise ValueError("precomputed is True but the input data is not a mask") self._precomputed = True else: self._precomputed = False # Need not validate X again as it would have already been validated # in the Imputer calling MissingIndicator if not self._precomputed: X = self._validate_input(X, in_fit=True) self._n_features = X.shape[1] missing_features_info = self._get_missing_features_info(X) self.features_ = missing_features_info[1] return missing_features_info[0] def fit(self, X, y=None): """Fit the transformer on `X`. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) Input data, where `n_samples` is the number of samples and `n_features` is the number of features. y : Ignored Not used, present for API consistency by convention. Returns ------- self : object Fitted estimator. """ self._validate_params() self._fit(X, y) return self def transform(self, X): """Generate missing values indicator for `X`. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) The input data to complete. Returns ------- Xt : {ndarray, sparse matrix} of shape (n_samples, n_features) \ or (n_samples, n_features_with_missing) The missing indicator for input data. The data type of `Xt` will be boolean. """ check_is_fitted(self) # Need not validate X again as it would have already been validated # in the Imputer calling MissingIndicator if not self._precomputed: X = self._validate_input(X, in_fit=False) else: if not (hasattr(X, "dtype") and X.dtype.kind == "b"): raise ValueError("precomputed is True but the input data is not a mask") imputer_mask, features = self._get_missing_features_info(X) if self.features == "missing-only": features_diff_fit_trans = np.setdiff1d(features, self.features_) if self.error_on_new and features_diff_fit_trans.size > 0: raise ValueError( "The features {} have missing values " "in transform but have no missing values " "in fit.".format(features_diff_fit_trans) ) if self.features_.size < self._n_features: imputer_mask = imputer_mask[:, self.features_] return imputer_mask def fit_transform(self, X, y=None): """Generate missing values indicator for `X`. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) The input data to complete. y : Ignored Not used, present for API consistency by convention. Returns ------- Xt : {ndarray, sparse matrix} of shape (n_samples, n_features) \ or (n_samples, n_features_with_missing) The missing indicator for input data. The data type of `Xt` will be boolean. """ self._validate_params() imputer_mask = self._fit(X, y) if self.features_.size < self._n_features: imputer_mask = imputer_mask[:, self.features_] return imputer_mask def get_feature_names_out(self, input_features=None): """Get output feature names for transformation. Parameters ---------- input_features : array-like of str or None, default=None Input features. - If `input_features` is `None`, then `feature_names_in_` is used as feature names in. If `feature_names_in_` is not defined, then the following input feature names are generated: `["x0", "x1", ..., "x(n_features_in_ - 1)"]`. - If `input_features` is an array-like, then `input_features` must match `feature_names_in_` if `feature_names_in_` is defined. Returns ------- feature_names_out : ndarray of str objects Transformed feature names. """ input_features = _check_feature_names_in(self, input_features) prefix = self.__class__.__name__.lower() return np.asarray( [ f"{prefix}_{feature_name}" for feature_name in input_features[self.features_] ], dtype=object, ) def _more_tags(self): return { "allow_nan": True, "X_types": ["2darray", "string"], "preserves_dtype": [], }
bsd-3-clause
pytorch/fairseq
examples/MMPT/mmpt/tasks/retritask.py
1
8413
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import os import torch import pickle import random from tqdm import tqdm from torch.utils.data import DataLoader from torch.utils.data.distributed import DistributedSampler from ..processors import ( ShardedHow2MetaProcessor, ShardedVideoProcessor, ShardedTextProcessor, VariedLenAligner, ) from ..datasets import MMDataset from .task import Task from ..modules import vectorpool from ..evaluators.predictor import Predictor from ..utils import set_seed, get_local_rank, get_world_size class RetriTask(Task): """abstract class for task with retrival.""" def reshape_subsample(self, sample): for key in sample: if torch.is_tensor(sample[key]): sample[key] = self.flat_subsample(sample[key]) return sample def flat_subsample(self, tensor): if tensor.size(0) == 1: tensor = tensor.squeeze(0) return tensor def build_dataloader(self): """called by `get_batch_iterator` in fairseqmmtask. """ # TODO: hard-code dataloader for retri for now and configurable in .yaml. # reuse the `train.lst`. self.config.dataset.split = "train" meta_processor = ShardedHow2MetaProcessor(self.config.dataset) video_processor = ShardedVideoProcessor(self.config.dataset) text_processor = ShardedTextProcessor(self.config.dataset) aligner = VariedLenAligner(self.config.dataset) aligner.subsampling = self.config.dataset.clip_per_video self.retri_data = MMDataset( meta_processor, video_processor, text_processor, aligner ) retri_sampler = DistributedSampler(self.retri_data) infer_scale = 16 batch_size = self.config.dataset.num_video_per_batch \ * infer_scale self.retri_dataloader = DataLoader( self.retri_data, collate_fn=self.retri_data.collater, batch_size=batch_size, shuffle=False, sampler=retri_sampler, num_workers=self.config.fairseq.dataset.num_workers ) return self.retri_dataloader def retrive_candidates(self, epoch, dataloader=None): if get_local_rank() == 0: print("running retrieval model.") out_dir = os.path.join( self.config.fairseq.checkpoint.save_dir, "retri") os.makedirs(out_dir, exist_ok=True) if not os.path.isfile( os.path.join( out_dir, "batched_e" + str(epoch) + "_videos0.pkl") ): if dataloader is None: dataloader = self.retri_dataloader self.model.eval() self.model.is_train = False assert self.retri_data.meta_processor.data == \ self.train_data.meta_processor.data # video_ids not mutated. self._retri_predict(epoch, dataloader) self.model.train() self.model.is_train = True torch.distributed.barrier() output = self._retri_sync(epoch, out_dir) torch.distributed.barrier() self.train_data.meta_processor.set_candidates(output) return output class VideoRetriTask(RetriTask): """RetriTask on video level.""" def reshape_subsample(self, sample): if ( hasattr(self.config.dataset, "clip_per_video") and self.config.dataset.clip_per_video is not None and self.config.dataset.clip_per_video > 1 ): for key in sample: if torch.is_tensor(sample[key]): sample[key] = self.flat_subsample(sample[key]) return sample def flat_subsample(self, tensor): if tensor.size(0) == 1: tensor = tensor.squeeze(0) return Task.flat_subsample(self, tensor) def _retri_predict(self, epoch, dataloader): set_seed(epoch) # save for retrival. predictor = VideoPredictor(self.config) predictor.predict_loop( self.model, dataloader) set_seed(epoch) # get the same text clips. # retrival. retri_predictor = VideoRetriPredictor( self.config) retri_predictor.predict_loop( self.model, predictor.vecpool.retriver, epoch) del predictor del retri_predictor def _retri_sync(self, epoch, out_dir): # gpu do the same merge. batched_videos = [] for local_rank in range(get_world_size()): fn = os.path.join( out_dir, "batched_e" + str(epoch) + "_videos" + str(local_rank) + ".pkl") with open(fn, "rb") as fr: batched_videos.extend(pickle.load(fr)) print( "[INFO] batched_videos", len(batched_videos), len(batched_videos[0])) return batched_videos class VideoPredictor(Predictor): def __init__(self, config): vectorpool_cls = getattr(vectorpool, config.vectorpool_cls) self.vecpool = vectorpool_cls(config) def predict_loop( self, model, dataloader, early_stop=-1, ): with torch.no_grad(): if get_local_rank() == 0: dataloader = tqdm(dataloader) for batch_idx, batch in enumerate(dataloader): if batch_idx == early_stop: break self(batch, model) return self.finalize() def __call__(self, sample, model, **kwargs): param = next(model.parameters()) dtype = param.dtype device = param.device subsample = sample["vfeats"].size(1) sample = self.to_ctx(sample, device, dtype) for key in sample: if torch.is_tensor(sample[key]): size = sample[key].size() if len(size) >= 2: batch_size = size[0] * size[1] expanded_size = ( (batch_size,) + size[2:] if len(size) > 2 else (batch_size,) ) sample[key] = sample[key].view(expanded_size) outputs = model(**sample) sample.update(outputs) self.vecpool(sample, subsample) def finalize(self): print("[INFO]", self.vecpool) if not self.vecpool.retriver.db.is_trained: self.vecpool.retriver.finalize_training() return self.vecpool.retriver class VideoRetriPredictor(Predictor): """ Online Retrieval Predictor for Clips (used by RetriTask). TODO: merge this with VisPredictor? """ def __init__(self, config): self.pred_dir = os.path.join( config.fairseq.checkpoint.save_dir, "retri") self.num_cands = config.num_cands self.num_video_per_batch = config.dataset.num_video_per_batch def predict_loop( self, model, retriver, epoch, early_stop=-1 ): # a fake loop that only try to recover video vector # from video_id. batched_videos = [] # obtain available video_ids. video_ids = list(retriver.videoid_to_vectoridx.keys()) dataloader = random.sample( video_ids, len(video_ids) // self.num_video_per_batch ) if get_local_rank() == 0: dataloader = tqdm(dataloader) for batch_idx, batch in enumerate(dataloader): # batch is one video id. if batch_idx == early_stop: break video_ids = retriver.search_by_video_ids( [batch], self.num_cands)[0] if len(video_ids) > self.num_video_per_batch: # we moved the center to make cluster robust. video_ids = random.sample(video_ids, self.num_video_per_batch) batched_videos.append(video_ids) return self.finalize(batched_videos, epoch) def finalize(self, batched_videos, epoch): fn = os.path.join( self.pred_dir, "batched_e" + str(epoch) + "_videos" + str(get_local_rank()) + ".pkl") with open(fn, "wb") as fw: pickle.dump(batched_videos, fw, pickle.HIGHEST_PROTOCOL) return batched_videos
mit
ECP-CANDLE/Benchmarks
common/darts/modules/conv/network.py
1
8972
import torch import torch.nn as nn import torch.nn.functional as F from darts.api import Model from darts.genotypes import PRIMITIVES, Genotype from darts.modules.classifier import MultitaskClassifier from darts.modules.conv import Cell class Hyperparameters: c = 8 num_nodes = 2 num_cells = 3 channel_multiplier = 2 stem_channel_multiplier = 2 num_embeddings = 35095 # vocab size embedding_dim = 1500 class ConvNetwork(Model): """Collection of cells""" def __init__(self, tasks, criterion, device="cpu", hyperparams=Hyperparameters()): super(ConvNetwork, self).__init__() self.tasks = tasks self.criterion = criterion self.device = device self.c = hyperparams.c self.num_cells = hyperparams.num_cells self.num_nodes = hyperparams.num_nodes self.channel_multiplier = hyperparams.channel_multiplier # stem_multiplier is for stem network, # and multiplier is for general cell c_curr = hyperparams.stem_channel_multiplier * self.c # 3*16 # stem network, convert 3 channel to c_curr self.stem = nn.Sequential( nn.Embedding( num_embeddings=hyperparams.num_embeddings, embedding_dim=hyperparams.embedding_dim, ), nn.Conv1d(hyperparams.embedding_dim, c_curr, 3, padding=1, bias=False), nn.BatchNorm1d(c_curr), ).to(self.device) # c_curr means a factor of the output channels of current cell # output channels = multiplier * c_curr cpp, cp, c_curr = c_curr, c_curr, self.c self.cells = nn.ModuleList() reduction_prev = False for i in range(hyperparams.num_cells): # for layer in the middle [1/3, 2/3], reduce via stride=2 if i in [hyperparams.num_cells // 3, 2 * hyperparams.num_cells // 3]: c_curr *= 2 reduction = True else: reduction = False # [cp, h, h] => [multiplier*c_curr, h/h//2, h/h//2] # the output channels = multiplier * c_curr cell = Cell( hyperparams.num_nodes, hyperparams.channel_multiplier, cpp, cp, c_curr, reduction, reduction_prev, ).to(self.device) # update reduction_prev reduction_prev = reduction self.cells += [cell] cpp, cp = cp, hyperparams.channel_multiplier * c_curr # adaptive pooling output size to 1x1 self.global_pooling = nn.AdaptiveAvgPool1d(1) # since cp records last cell's output channels # it indicates the input channel number self.classifier = MultitaskClassifier(cp, tasks) # k is the total number of edges inside single cell, 14 k = sum(1 for i in range(self.num_nodes) for j in range(2 + i)) num_ops = len(PRIMITIVES) # 8 self.alpha_normal = nn.Parameter(torch.randn(k, num_ops)) self.alpha_reduce = nn.Parameter(torch.randn(k, num_ops)) with torch.no_grad(): # initialize to smaller value self.alpha_normal.mul_(1e-3) self.alpha_reduce.mul_(1e-3) self._arch_parameters = [ self.alpha_normal, self.alpha_reduce, ] def new(self): """Create a new model initialzed with current alpha parameters. Weights are left untouched. Returns ------- model : Network New model initialized with current alpha. """ model = ConvNetwork(self.tasks, self.criterion).to(self.device) for x, y in zip(model.arch_parameters(), self.arch_parameters()): x.data.copy_(y.data) return model def forward(self, x): """ in: torch.Size([3, 3, 32, 32]) stem: torch.Size([3, 48, 32, 32]) cell: 0 torch.Size([3, 64, 32, 32]) False cell: 1 torch.Size([3, 64, 32, 32]) False cell: 2 torch.Size([3, 128, 16, 16]) True cell: 3 torch.Size([3, 128, 16, 16]) False cell: 4 torch.Size([3, 128, 16, 16]) False cell: 5 torch.Size([3, 256, 8, 8]) True cell: 6 torch.Size([3, 256, 8, 8]) False cell: 7 torch.Size([3, 256, 8, 8]) False pool: torch.Size([16, 256, 1, 1]) linear: [b, 10] :param x: :return: """ # print('network in:', x.shape) # s0 & s1 means the last cells' output s0 = s1 = self.stem(x) # [b, 3, 32, 32] => [b, 48, 32, 32] # print('network stem:', s0.shape) # print('network stem1:', s1.shape) for i, cell in enumerate(self.cells): # weights are shared across all reduction cell or normal cell # according to current cell's type, it choose which architecture parameters # to use if cell.reduction: # if current cell is reduction cell weights = F.softmax(self.alpha_reduce, dim=-1) else: weights = F.softmax(self.alpha_normal, dim=-1) # [14, 8] # execute cell() firstly and then assign s0=s1, s1=result s0, s1 = s1, cell(s0, s1, weights) # [40, 64, 32, 32] # print('cell:',i, s1.shape, cell.reduction, cell.reduction_prev) # print('\n') # s1 is the last cell's output out = self.global_pooling(s1) # logits = {} # for task, fc in self.classifier.items(): # logits[task] = fc(out.view(out.size(0), -1)) logits = self.classifier(out.view(out.size(0), -1)) return logits def loss(self, data, target, reduce="mean"): """Calculate a value of loss function""" logits = self(data) for task, logit in logits.items(): logits[task] = logit.to(self.device) losses = {} for task, label in target.items(): label = label.to(self.device) losses[task] = self.criterion(logits[task], label) if reduce: total = 0 for _, value in losses.items(): total += value if reduce == "mean": losses = total / len(losses) elif reduce == "sum": losses = total else: raise ValueError("Reduced loss must use either `mean` or `sum`!") return losses def arch_parameters(self): return self._arch_parameters def genotype(self): """ :return: """ def _parse(weights): """ :param weights: [14, 8] :return: """ gene = [] n = 2 start = 0 for i in range(self.num_nodes): # for each node end = start + n W = weights[start:end].copy() # [2, 8], [3, 8], ... edges = sorted( range(i + 2), # i+2 is the number of connection for node i key=lambda x: -max( W[x][k] # by descending order for k in range(len(W[x])) # get strongest ops if k != PRIMITIVES.index("none") ), )[ :2 ] # only has two inputs for j in edges: # for every input nodes j of current node i k_best = None for k in range( len(W[j]) ): # get strongest ops for current input j->i if k != PRIMITIVES.index("none"): if k_best is None or W[j][k] > W[j][k_best]: k_best = k gene.append((PRIMITIVES[k_best], j)) # save ops and input node start = end n += 1 return gene gene_normal = _parse(F.softmax(self.alpha_normal, dim=-1).data.cpu().numpy()) gene_reduce = _parse(F.softmax(self.alpha_reduce, dim=-1).data.cpu().numpy()) concat = range(2 + self.num_nodes - self.channel_multiplier, self.num_nodes + 2) genotype = Genotype( normal=gene_normal, normal_concat=concat, reduce=gene_reduce, reduce_concat=concat, ) return genotype def new( c, num_classes, num_layers, criterion, device, steps=4, multiplier=4, stem_multiplier=3, ): """ create a new model and initialize it with current alpha parameters. However, its weights are left untouched. :return: """ model = ConvNetwork( c, num_classes, num_layers, criterion, steps, multiplier, stem_multiplier ).to(device) for x, y in zip(model.arch_parameters(), model.arch_parameters()): x.data.copy_(y.data) return model
mit
pytorch/fairseq
fairseq/criterions/composite_loss.py
1
3793
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from fairseq import utils from fairseq.criterions import LegacyFairseqCriterion, register_criterion from torch import nn @register_criterion("composite_loss") class CompositeLoss(LegacyFairseqCriterion): """This is a composite loss that, given a list of model outputs and a list of targets, computes an average of losses for each output-target pair""" def __init__(self, args, task): super().__init__(args, task) self.underlying_criterion = args.underlying_criterion @staticmethod def add_args(parser): """Add criterion-specific arguments to the parser.""" # fmt: off parser.add_argument('--underlying-criterion', type=str, metavar='VAL', required=True, help='underlying criterion to use for the composite loss') # fmt: on @staticmethod def build_underlying_criterion(args, task): saved_criterion = args.criterion args.criterion = args.underlying_criterion assert saved_criterion != args.underlying_criterion underlying_criterion = task.build_criterion(args) args.criterion = saved_criterion return underlying_criterion @classmethod def build_criterion(cls, args, task): underlying_criterion = CompositeLoss.build_underlying_criterion(args, task) class FakeModel(nn.Module): def __init__(self, model, net_out, target): super().__init__() self.model = model self.net_out = net_out self.target = target def forward(self, **unused): return self.net_out def get_normalized_probs(self, net_output, log_probs, sample=None): return self.model.get_normalized_probs( net_output, log_probs, sample=sample ) def get_targets(self, *unused): return self.target @property def decoder(self): return self.model.decoder class _CompositeLoss(LegacyFairseqCriterion): def __init__(self, args, task, underlying_criterion): super().__init__(args, task) self.underlying_criterion = underlying_criterion def forward(self, model, sample, reduce=True): net_outputs = model(**sample["net_input"]) targets = sample["target"] bsz = targets[0].size(0) loss = net_outputs[0][0].new(1 if reduce else bsz).float().zero_() sample_size = 0 logging_output = {} for o, t in zip(net_outputs[0], targets): m = FakeModel(model, (o, net_outputs[1]), t) sample["target"] = t l, ss, logging_output = self.underlying_criterion(m, sample, reduce) loss += l sample_size += ss loss.div_(len(targets)) sample_size /= len(targets) logging_output["loss"] = utils.item(loss.data) if reduce else loss.data return loss, sample_size, logging_output @staticmethod def aggregate_logging_outputs(logging_outputs): return underlying_criterion.__class__.aggregate_logging_outputs( logging_outputs ) @staticmethod def reduce_metrics(logging_outputs) -> None: underlying_criterion.__class__.reduce_metrics(logging_outputs) return _CompositeLoss(args, task, underlying_criterion)
mit
anntzer/scikit-learn
sklearn/linear_model/_stochastic_gradient.py
4
87221
# Authors: Peter Prettenhofer <peter.prettenhofer@gmail.com> (main author) # Mathieu Blondel (partial_fit support) # # License: BSD 3 clause """Classification, regression and One-Class SVM using Stochastic Gradient Descent (SGD). """ import numpy as np import warnings from abc import ABCMeta, abstractmethod from numbers import Integral, Real from joblib import Parallel from ..base import clone, is_classifier from ._base import LinearClassifierMixin, SparseCoefMixin from ._base import make_dataset from ..base import BaseEstimator, RegressorMixin, OutlierMixin from ..utils import check_random_state from ..utils.metaestimators import available_if from ..utils.extmath import safe_sparse_dot from ..utils.multiclass import _check_partial_fit_first_call from ..utils.validation import check_is_fitted, _check_sample_weight from ..utils._param_validation import Interval from ..utils._param_validation import StrOptions from ..utils._param_validation import Hidden from ..utils.fixes import delayed from ..exceptions import ConvergenceWarning from ..model_selection import StratifiedShuffleSplit, ShuffleSplit from ._sgd_fast import _plain_sgd from ..utils import compute_class_weight from ._sgd_fast import Hinge from ._sgd_fast import SquaredHinge from ._sgd_fast import Log from ._sgd_fast import ModifiedHuber from ._sgd_fast import SquaredLoss from ._sgd_fast import Huber from ._sgd_fast import EpsilonInsensitive from ._sgd_fast import SquaredEpsilonInsensitive LEARNING_RATE_TYPES = { "constant": 1, "optimal": 2, "invscaling": 3, "adaptive": 4, "pa1": 5, "pa2": 6, } PENALTY_TYPES = {"none": 0, "l2": 2, "l1": 1, "elasticnet": 3} DEFAULT_EPSILON = 0.1 # Default value of ``epsilon`` parameter. MAX_INT = np.iinfo(np.int32).max class _ValidationScoreCallback: """Callback for early stopping based on validation score""" def __init__(self, estimator, X_val, y_val, sample_weight_val, classes=None): self.estimator = clone(estimator) self.estimator.t_ = 1 # to pass check_is_fitted if classes is not None: self.estimator.classes_ = classes self.X_val = X_val self.y_val = y_val self.sample_weight_val = sample_weight_val def __call__(self, coef, intercept): est = self.estimator est.coef_ = coef.reshape(1, -1) est.intercept_ = np.atleast_1d(intercept) return est.score(self.X_val, self.y_val, self.sample_weight_val) class BaseSGD(SparseCoefMixin, BaseEstimator, metaclass=ABCMeta): """Base class for SGD classification and regression.""" _parameter_constraints: dict = { "fit_intercept": ["boolean"], "max_iter": [Interval(Integral, 1, None, closed="left")], "tol": [Interval(Real, 0, None, closed="left"), None], "shuffle": ["boolean"], "verbose": ["verbose"], "random_state": ["random_state"], "warm_start": ["boolean"], "average": [Interval(Integral, 0, None, closed="left"), bool, np.bool_], } def __init__( self, loss, *, penalty="l2", alpha=0.0001, C=1.0, l1_ratio=0.15, fit_intercept=True, max_iter=1000, tol=1e-3, shuffle=True, verbose=0, epsilon=0.1, random_state=None, learning_rate="optimal", eta0=0.0, power_t=0.5, early_stopping=False, validation_fraction=0.1, n_iter_no_change=5, warm_start=False, average=False, ): self.loss = loss self.penalty = penalty self.learning_rate = learning_rate self.epsilon = epsilon self.alpha = alpha self.C = C self.l1_ratio = l1_ratio self.fit_intercept = fit_intercept self.shuffle = shuffle self.random_state = random_state self.verbose = verbose self.eta0 = eta0 self.power_t = power_t self.early_stopping = early_stopping self.validation_fraction = validation_fraction self.n_iter_no_change = n_iter_no_change self.warm_start = warm_start self.average = average self.max_iter = max_iter self.tol = tol @abstractmethod def fit(self, X, y): """Fit model.""" def _more_validate_params(self, for_partial_fit=False): """Validate input params.""" if self.early_stopping and for_partial_fit: raise ValueError("early_stopping should be False with partial_fit") if ( self.learning_rate in ("constant", "invscaling", "adaptive") and self.eta0 <= 0.0 ): raise ValueError("eta0 must be > 0") if self.learning_rate == "optimal" and self.alpha == 0: raise ValueError( "alpha must be > 0 since " "learning_rate is 'optimal'. alpha is used " "to compute the optimal learning rate." ) # raises ValueError if not registered self._get_penalty_type(self.penalty) self._get_learning_rate_type(self.learning_rate) # TODO(1.3): remove "log" if self.loss == "log": warnings.warn( "The loss 'log' was deprecated in v1.1 and will be removed in version " "1.3. Use `loss='log_loss'` which is equivalent.", FutureWarning, ) def _get_loss_function(self, loss): """Get concrete ``LossFunction`` object for str ``loss``.""" loss_ = self.loss_functions[loss] loss_class, args = loss_[0], loss_[1:] if loss in ("huber", "epsilon_insensitive", "squared_epsilon_insensitive"): args = (self.epsilon,) return loss_class(*args) def _get_learning_rate_type(self, learning_rate): return LEARNING_RATE_TYPES[learning_rate] def _get_penalty_type(self, penalty): penalty = str(penalty).lower() return PENALTY_TYPES[penalty] def _allocate_parameter_mem( self, n_classes, n_features, coef_init=None, intercept_init=None, one_class=0 ): """Allocate mem for parameters; initialize if provided.""" if n_classes > 2: # allocate coef_ for multi-class if coef_init is not None: coef_init = np.asarray(coef_init, order="C") if coef_init.shape != (n_classes, n_features): raise ValueError("Provided ``coef_`` does not match dataset. ") self.coef_ = coef_init else: self.coef_ = np.zeros( (n_classes, n_features), dtype=np.float64, order="C" ) # allocate intercept_ for multi-class if intercept_init is not None: intercept_init = np.asarray(intercept_init, order="C") if intercept_init.shape != (n_classes,): raise ValueError("Provided intercept_init does not match dataset.") self.intercept_ = intercept_init else: self.intercept_ = np.zeros(n_classes, dtype=np.float64, order="C") else: # allocate coef_ if coef_init is not None: coef_init = np.asarray(coef_init, dtype=np.float64, order="C") coef_init = coef_init.ravel() if coef_init.shape != (n_features,): raise ValueError("Provided coef_init does not match dataset.") self.coef_ = coef_init else: self.coef_ = np.zeros(n_features, dtype=np.float64, order="C") # allocate intercept_ if intercept_init is not None: intercept_init = np.asarray(intercept_init, dtype=np.float64) if intercept_init.shape != (1,) and intercept_init.shape != (): raise ValueError("Provided intercept_init does not match dataset.") if one_class: self.offset_ = intercept_init.reshape( 1, ) else: self.intercept_ = intercept_init.reshape( 1, ) else: if one_class: self.offset_ = np.zeros(1, dtype=np.float64, order="C") else: self.intercept_ = np.zeros(1, dtype=np.float64, order="C") # initialize average parameters if self.average > 0: self._standard_coef = self.coef_ self._average_coef = np.zeros(self.coef_.shape, dtype=np.float64, order="C") if one_class: self._standard_intercept = 1 - self.offset_ else: self._standard_intercept = self.intercept_ self._average_intercept = np.zeros( self._standard_intercept.shape, dtype=np.float64, order="C" ) def _make_validation_split(self, y): """Split the dataset between training set and validation set. Parameters ---------- y : ndarray of shape (n_samples, ) Target values. Returns ------- validation_mask : ndarray of shape (n_samples, ) Equal to True on the validation set, False on the training set. """ n_samples = y.shape[0] validation_mask = np.zeros(n_samples, dtype=np.bool_) if not self.early_stopping: # use the full set for training, with an empty validation set return validation_mask if is_classifier(self): splitter_type = StratifiedShuffleSplit else: splitter_type = ShuffleSplit cv = splitter_type( test_size=self.validation_fraction, random_state=self.random_state ) idx_train, idx_val = next(cv.split(np.zeros(shape=(y.shape[0], 1)), y)) if idx_train.shape[0] == 0 or idx_val.shape[0] == 0: raise ValueError( "Splitting %d samples into a train set and a validation set " "with validation_fraction=%r led to an empty set (%d and %d " "samples). Please either change validation_fraction, increase " "number of samples, or disable early_stopping." % ( n_samples, self.validation_fraction, idx_train.shape[0], idx_val.shape[0], ) ) validation_mask[idx_val] = True return validation_mask def _make_validation_score_cb( self, validation_mask, X, y, sample_weight, classes=None ): if not self.early_stopping: return None return _ValidationScoreCallback( self, X[validation_mask], y[validation_mask], sample_weight[validation_mask], classes=classes, ) def _prepare_fit_binary(est, y, i): """Initialization for fit_binary. Returns y, coef, intercept, average_coef, average_intercept. """ y_i = np.ones(y.shape, dtype=np.float64, order="C") y_i[y != est.classes_[i]] = -1.0 average_intercept = 0 average_coef = None if len(est.classes_) == 2: if not est.average: coef = est.coef_.ravel() intercept = est.intercept_[0] else: coef = est._standard_coef.ravel() intercept = est._standard_intercept[0] average_coef = est._average_coef.ravel() average_intercept = est._average_intercept[0] else: if not est.average: coef = est.coef_[i] intercept = est.intercept_[i] else: coef = est._standard_coef[i] intercept = est._standard_intercept[i] average_coef = est._average_coef[i] average_intercept = est._average_intercept[i] return y_i, coef, intercept, average_coef, average_intercept def fit_binary( est, i, X, y, alpha, C, learning_rate, max_iter, pos_weight, neg_weight, sample_weight, validation_mask=None, random_state=None, ): """Fit a single binary classifier. The i'th class is considered the "positive" class. Parameters ---------- est : Estimator object The estimator to fit i : int Index of the positive class X : numpy array or sparse matrix of shape [n_samples,n_features] Training data y : numpy array of shape [n_samples, ] Target values alpha : float The regularization parameter C : float Maximum step size for passive aggressive learning_rate : str The learning rate. Accepted values are 'constant', 'optimal', 'invscaling', 'pa1' and 'pa2'. max_iter : int The maximum number of iterations (epochs) pos_weight : float The weight of the positive class neg_weight : float The weight of the negative class sample_weight : numpy array of shape [n_samples, ] The weight of each sample validation_mask : numpy array of shape [n_samples, ], default=None Precomputed validation mask in case _fit_binary is called in the context of a one-vs-rest reduction. random_state : int, RandomState instance, default=None If int, random_state is the seed used by the random number generator; If RandomState instance, random_state is the random number generator; If None, the random number generator is the RandomState instance used by `np.random`. """ # if average is not true, average_coef, and average_intercept will be # unused y_i, coef, intercept, average_coef, average_intercept = _prepare_fit_binary( est, y, i ) assert y_i.shape[0] == y.shape[0] == sample_weight.shape[0] random_state = check_random_state(random_state) dataset, intercept_decay = make_dataset( X, y_i, sample_weight, random_state=random_state ) penalty_type = est._get_penalty_type(est.penalty) learning_rate_type = est._get_learning_rate_type(learning_rate) if validation_mask is None: validation_mask = est._make_validation_split(y_i) classes = np.array([-1, 1], dtype=y_i.dtype) validation_score_cb = est._make_validation_score_cb( validation_mask, X, y_i, sample_weight, classes=classes ) # numpy mtrand expects a C long which is a signed 32 bit integer under # Windows seed = random_state.randint(MAX_INT) tol = est.tol if est.tol is not None else -np.inf coef, intercept, average_coef, average_intercept, n_iter_ = _plain_sgd( coef, intercept, average_coef, average_intercept, est.loss_function_, penalty_type, alpha, C, est.l1_ratio, dataset, validation_mask, est.early_stopping, validation_score_cb, int(est.n_iter_no_change), max_iter, tol, int(est.fit_intercept), int(est.verbose), int(est.shuffle), seed, pos_weight, neg_weight, learning_rate_type, est.eta0, est.power_t, 0, est.t_, intercept_decay, est.average, ) if est.average: if len(est.classes_) == 2: est._average_intercept[0] = average_intercept else: est._average_intercept[i] = average_intercept return coef, intercept, n_iter_ class BaseSGDClassifier(LinearClassifierMixin, BaseSGD, metaclass=ABCMeta): # TODO(1.3): Remove "log"" loss_functions = { "hinge": (Hinge, 1.0), "squared_hinge": (SquaredHinge, 1.0), "perceptron": (Hinge, 0.0), "log_loss": (Log,), "log": (Log,), "modified_huber": (ModifiedHuber,), "squared_error": (SquaredLoss,), "huber": (Huber, DEFAULT_EPSILON), "epsilon_insensitive": (EpsilonInsensitive, DEFAULT_EPSILON), "squared_epsilon_insensitive": (SquaredEpsilonInsensitive, DEFAULT_EPSILON), } _parameter_constraints: dict = { **BaseSGD._parameter_constraints, "loss": [StrOptions(set(loss_functions), deprecated={"log"})], "early_stopping": ["boolean"], "validation_fraction": [Interval(Real, 0, 1, closed="neither")], "n_iter_no_change": [Interval(Integral, 1, None, closed="left")], "n_jobs": [Integral, None], "class_weight": [StrOptions({"balanced"}), dict, None], } @abstractmethod def __init__( self, loss="hinge", *, penalty="l2", alpha=0.0001, l1_ratio=0.15, fit_intercept=True, max_iter=1000, tol=1e-3, shuffle=True, verbose=0, epsilon=DEFAULT_EPSILON, n_jobs=None, random_state=None, learning_rate="optimal", eta0=0.0, power_t=0.5, early_stopping=False, validation_fraction=0.1, n_iter_no_change=5, class_weight=None, warm_start=False, average=False, ): super().__init__( loss=loss, penalty=penalty, alpha=alpha, l1_ratio=l1_ratio, fit_intercept=fit_intercept, max_iter=max_iter, tol=tol, shuffle=shuffle, verbose=verbose, epsilon=epsilon, random_state=random_state, learning_rate=learning_rate, eta0=eta0, power_t=power_t, early_stopping=early_stopping, validation_fraction=validation_fraction, n_iter_no_change=n_iter_no_change, warm_start=warm_start, average=average, ) self.class_weight = class_weight self.n_jobs = n_jobs def _partial_fit( self, X, y, alpha, C, loss, learning_rate, max_iter, classes, sample_weight, coef_init, intercept_init, ): first_call = not hasattr(self, "classes_") X, y = self._validate_data( X, y, accept_sparse="csr", dtype=np.float64, order="C", accept_large_sparse=False, reset=first_call, ) n_samples, n_features = X.shape _check_partial_fit_first_call(self, classes) n_classes = self.classes_.shape[0] # Allocate datastructures from input arguments self._expanded_class_weight = compute_class_weight( self.class_weight, classes=self.classes_, y=y ) sample_weight = _check_sample_weight(sample_weight, X) if getattr(self, "coef_", None) is None or coef_init is not None: self._allocate_parameter_mem( n_classes, n_features, coef_init, intercept_init ) elif n_features != self.coef_.shape[-1]: raise ValueError( "Number of features %d does not match previous data %d." % (n_features, self.coef_.shape[-1]) ) self.loss_function_ = self._get_loss_function(loss) if not hasattr(self, "t_"): self.t_ = 1.0 # delegate to concrete training procedure if n_classes > 2: self._fit_multiclass( X, y, alpha=alpha, C=C, learning_rate=learning_rate, sample_weight=sample_weight, max_iter=max_iter, ) elif n_classes == 2: self._fit_binary( X, y, alpha=alpha, C=C, learning_rate=learning_rate, sample_weight=sample_weight, max_iter=max_iter, ) else: raise ValueError( "The number of classes has to be greater than one; got %d class" % n_classes ) return self def _fit( self, X, y, alpha, C, loss, learning_rate, coef_init=None, intercept_init=None, sample_weight=None, ): if hasattr(self, "classes_"): # delete the attribute otherwise _partial_fit thinks it's not the first call delattr(self, "classes_") # labels can be encoded as float, int, or string literals # np.unique sorts in asc order; largest class id is positive class y = self._validate_data(y=y) classes = np.unique(y) if self.warm_start and hasattr(self, "coef_"): if coef_init is None: coef_init = self.coef_ if intercept_init is None: intercept_init = self.intercept_ else: self.coef_ = None self.intercept_ = None if self.average > 0: self._standard_coef = self.coef_ self._standard_intercept = self.intercept_ self._average_coef = None self._average_intercept = None # Clear iteration count for multiple call to fit. self.t_ = 1.0 self._partial_fit( X, y, alpha, C, loss, learning_rate, self.max_iter, classes, sample_weight, coef_init, intercept_init, ) if ( self.tol is not None and self.tol > -np.inf and self.n_iter_ == self.max_iter ): warnings.warn( "Maximum number of iteration reached before " "convergence. Consider increasing max_iter to " "improve the fit.", ConvergenceWarning, ) return self def _fit_binary(self, X, y, alpha, C, sample_weight, learning_rate, max_iter): """Fit a binary classifier on X and y.""" coef, intercept, n_iter_ = fit_binary( self, 1, X, y, alpha, C, learning_rate, max_iter, self._expanded_class_weight[1], self._expanded_class_weight[0], sample_weight, random_state=self.random_state, ) self.t_ += n_iter_ * X.shape[0] self.n_iter_ = n_iter_ # need to be 2d if self.average > 0: if self.average <= self.t_ - 1: self.coef_ = self._average_coef.reshape(1, -1) self.intercept_ = self._average_intercept else: self.coef_ = self._standard_coef.reshape(1, -1) self._standard_intercept = np.atleast_1d(intercept) self.intercept_ = self._standard_intercept else: self.coef_ = coef.reshape(1, -1) # intercept is a float, need to convert it to an array of length 1 self.intercept_ = np.atleast_1d(intercept) def _fit_multiclass(self, X, y, alpha, C, learning_rate, sample_weight, max_iter): """Fit a multi-class classifier by combining binary classifiers Each binary classifier predicts one class versus all others. This strategy is called OvA (One versus All) or OvR (One versus Rest). """ # Precompute the validation split using the multiclass labels # to ensure proper balancing of the classes. validation_mask = self._make_validation_split(y) # Use joblib to fit OvA in parallel. # Pick the random seed for each job outside of fit_binary to avoid # sharing the estimator random state between threads which could lead # to non-deterministic behavior random_state = check_random_state(self.random_state) seeds = random_state.randint(MAX_INT, size=len(self.classes_)) result = Parallel( n_jobs=self.n_jobs, verbose=self.verbose, require="sharedmem" )( delayed(fit_binary)( self, i, X, y, alpha, C, learning_rate, max_iter, self._expanded_class_weight[i], 1.0, sample_weight, validation_mask=validation_mask, random_state=seed, ) for i, seed in enumerate(seeds) ) # take the maximum of n_iter_ over every binary fit n_iter_ = 0.0 for i, (_, intercept, n_iter_i) in enumerate(result): self.intercept_[i] = intercept n_iter_ = max(n_iter_, n_iter_i) self.t_ += n_iter_ * X.shape[0] self.n_iter_ = n_iter_ if self.average > 0: if self.average <= self.t_ - 1.0: self.coef_ = self._average_coef self.intercept_ = self._average_intercept else: self.coef_ = self._standard_coef self._standard_intercept = np.atleast_1d(self.intercept_) self.intercept_ = self._standard_intercept def partial_fit(self, X, y, classes=None, sample_weight=None): """Perform one epoch of stochastic gradient descent on given samples. Internally, this method uses ``max_iter = 1``. Therefore, it is not guaranteed that a minimum of the cost function is reached after calling it once. Matters such as objective convergence, early stopping, and learning rate adjustments should be handled by the user. Parameters ---------- X : {array-like, sparse matrix}, shape (n_samples, n_features) Subset of the training data. y : ndarray of shape (n_samples,) Subset of the target values. classes : ndarray of shape (n_classes,), default=None Classes across all calls to partial_fit. Can be obtained by via `np.unique(y_all)`, where y_all is the target vector of the entire dataset. This argument is required for the first call to partial_fit and can be omitted in the subsequent calls. Note that y doesn't need to contain all labels in `classes`. sample_weight : array-like, shape (n_samples,), default=None Weights applied to individual samples. If not provided, uniform weights are assumed. Returns ------- self : object Returns an instance of self. """ if not hasattr(self, "classes_"): self._validate_params() self._more_validate_params(for_partial_fit=True) if self.class_weight == "balanced": raise ValueError( "class_weight '{0}' is not supported for " "partial_fit. In order to use 'balanced' weights," " use compute_class_weight('{0}', " "classes=classes, y=y). " "In place of y you can use a large enough sample " "of the full training set target to properly " "estimate the class frequency distributions. " "Pass the resulting weights as the class_weight " "parameter.".format(self.class_weight) ) return self._partial_fit( X, y, alpha=self.alpha, C=1.0, loss=self.loss, learning_rate=self.learning_rate, max_iter=1, classes=classes, sample_weight=sample_weight, coef_init=None, intercept_init=None, ) def fit(self, X, y, coef_init=None, intercept_init=None, sample_weight=None): """Fit linear model with Stochastic Gradient Descent. Parameters ---------- X : {array-like, sparse matrix}, shape (n_samples, n_features) Training data. y : ndarray of shape (n_samples,) Target values. coef_init : ndarray of shape (n_classes, n_features), default=None The initial coefficients to warm-start the optimization. intercept_init : ndarray of shape (n_classes,), default=None The initial intercept to warm-start the optimization. sample_weight : array-like, shape (n_samples,), default=None Weights applied to individual samples. If not provided, uniform weights are assumed. These weights will be multiplied with class_weight (passed through the constructor) if class_weight is specified. Returns ------- self : object Returns an instance of self. """ self._validate_params() self._more_validate_params() return self._fit( X, y, alpha=self.alpha, C=1.0, loss=self.loss, learning_rate=self.learning_rate, coef_init=coef_init, intercept_init=intercept_init, sample_weight=sample_weight, ) class SGDClassifier(BaseSGDClassifier): """Linear classifiers (SVM, logistic regression, etc.) with SGD training. This estimator implements regularized linear models with stochastic gradient descent (SGD) learning: the gradient of the loss is estimated each sample at a time and the model is updated along the way with a decreasing strength schedule (aka learning rate). SGD allows minibatch (online/out-of-core) learning via the `partial_fit` method. For best results using the default learning rate schedule, the data should have zero mean and unit variance. This implementation works with data represented as dense or sparse arrays of floating point values for the features. The model it fits can be controlled with the loss parameter; by default, it fits a linear support vector machine (SVM). The regularizer is a penalty added to the loss function that shrinks model parameters towards the zero vector using either the squared euclidean norm L2 or the absolute norm L1 or a combination of both (Elastic Net). If the parameter update crosses the 0.0 value because of the regularizer, the update is truncated to 0.0 to allow for learning sparse models and achieve online feature selection. Read more in the :ref:`User Guide <sgd>`. Parameters ---------- loss : {'hinge', 'log_loss', 'log', 'modified_huber', 'squared_hinge',\ 'perceptron', 'squared_error', 'huber', 'epsilon_insensitive',\ 'squared_epsilon_insensitive'}, default='hinge' The loss function to be used. - 'hinge' gives a linear SVM. - 'log_loss' gives logistic regression, a probabilistic classifier. - 'modified_huber' is another smooth loss that brings tolerance to outliers as well as probability estimates. - 'squared_hinge' is like hinge but is quadratically penalized. - 'perceptron' is the linear loss used by the perceptron algorithm. - The other losses, 'squared_error', 'huber', 'epsilon_insensitive' and 'squared_epsilon_insensitive' are designed for regression but can be useful in classification as well; see :class:`~sklearn.linear_model.SGDRegressor` for a description. More details about the losses formulas can be found in the :ref:`User Guide <sgd_mathematical_formulation>`. .. deprecated:: 1.1 The loss 'log' was deprecated in v1.1 and will be removed in version 1.3. Use `loss='log_loss'` which is equivalent. penalty : {'l2', 'l1', 'elasticnet', None}, default='l2' The penalty (aka regularization term) to be used. Defaults to 'l2' which is the standard regularizer for linear SVM models. 'l1' and 'elasticnet' might bring sparsity to the model (feature selection) not achievable with 'l2'. No penalty is added when set to `None`. alpha : float, default=0.0001 Constant that multiplies the regularization term. The higher the value, the stronger the regularization. Also used to compute the learning rate when set to `learning_rate` is set to 'optimal'. Values must be in the range `[0.0, inf)`. l1_ratio : float, default=0.15 The Elastic Net mixing parameter, with 0 <= l1_ratio <= 1. l1_ratio=0 corresponds to L2 penalty, l1_ratio=1 to L1. Only used if `penalty` is 'elasticnet'. Values must be in the range `[0.0, 1.0]`. fit_intercept : bool, default=True Whether the intercept should be estimated or not. If False, the data is assumed to be already centered. max_iter : int, default=1000 The maximum number of passes over the training data (aka epochs). It only impacts the behavior in the ``fit`` method, and not the :meth:`partial_fit` method. Values must be in the range `[1, inf)`. .. versionadded:: 0.19 tol : float or None, default=1e-3 The stopping criterion. If it is not None, training will stop when (loss > best_loss - tol) for ``n_iter_no_change`` consecutive epochs. Convergence is checked against the training loss or the validation loss depending on the `early_stopping` parameter. Values must be in the range `[0.0, inf)`. .. versionadded:: 0.19 shuffle : bool, default=True Whether or not the training data should be shuffled after each epoch. verbose : int, default=0 The verbosity level. Values must be in the range `[0, inf)`. epsilon : float, default=0.1 Epsilon in the epsilon-insensitive loss functions; only if `loss` is 'huber', 'epsilon_insensitive', or 'squared_epsilon_insensitive'. For 'huber', determines the threshold at which it becomes less important to get the prediction exactly right. For epsilon-insensitive, any differences between the current prediction and the correct label are ignored if they are less than this threshold. Values must be in the range `[0.0, inf)`. n_jobs : int, default=None The number of CPUs to use to do the OVA (One Versus All, for multi-class problems) computation. ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context. ``-1`` means using all processors. See :term:`Glossary <n_jobs>` for more details. random_state : int, RandomState instance, default=None Used for shuffling the data, when ``shuffle`` is set to ``True``. Pass an int for reproducible output across multiple function calls. See :term:`Glossary <random_state>`. Integer values must be in the range `[0, 2**32 - 1]`. learning_rate : str, default='optimal' The learning rate schedule: - 'constant': `eta = eta0` - 'optimal': `eta = 1.0 / (alpha * (t + t0))` where `t0` is chosen by a heuristic proposed by Leon Bottou. - 'invscaling': `eta = eta0 / pow(t, power_t)` - 'adaptive': `eta = eta0`, as long as the training keeps decreasing. Each time n_iter_no_change consecutive epochs fail to decrease the training loss by tol or fail to increase validation score by tol if `early_stopping` is `True`, the current learning rate is divided by 5. .. versionadded:: 0.20 Added 'adaptive' option eta0 : float, default=0.0 The initial learning rate for the 'constant', 'invscaling' or 'adaptive' schedules. The default value is 0.0 as eta0 is not used by the default schedule 'optimal'. Values must be in the range `(0.0, inf)`. power_t : float, default=0.5 The exponent for inverse scaling learning rate [default 0.5]. Values must be in the range `(-inf, inf)`. early_stopping : bool, default=False Whether to use early stopping to terminate training when validation score is not improving. If set to `True`, it will automatically set aside a stratified fraction of training data as validation and terminate training when validation score returned by the `score` method is not improving by at least tol for n_iter_no_change consecutive epochs. .. versionadded:: 0.20 Added 'early_stopping' option validation_fraction : float, default=0.1 The proportion of training data to set aside as validation set for early stopping. Must be between 0 and 1. Only used if `early_stopping` is True. Values must be in the range `(0.0, 1.0)`. .. versionadded:: 0.20 Added 'validation_fraction' option n_iter_no_change : int, default=5 Number of iterations with no improvement to wait before stopping fitting. Convergence is checked against the training loss or the validation loss depending on the `early_stopping` parameter. Integer values must be in the range `[1, max_iter)`. .. versionadded:: 0.20 Added 'n_iter_no_change' option class_weight : dict, {class_label: weight} or "balanced", default=None Preset for the class_weight fit parameter. Weights associated with classes. If not given, all classes are supposed to have weight one. The "balanced" mode uses the values of y to automatically adjust weights inversely proportional to class frequencies in the input data as ``n_samples / (n_classes * np.bincount(y))``. warm_start : bool, default=False When set to True, reuse the solution of the previous call to fit as initialization, otherwise, just erase the previous solution. See :term:`the Glossary <warm_start>`. Repeatedly calling fit or partial_fit when warm_start is True can result in a different solution than when calling fit a single time because of the way the data is shuffled. If a dynamic learning rate is used, the learning rate is adapted depending on the number of samples already seen. Calling ``fit`` resets this counter, while ``partial_fit`` will result in increasing the existing counter. average : bool or int, default=False When set to `True`, computes the averaged SGD weights across all updates and stores the result in the ``coef_`` attribute. If set to an int greater than 1, averaging will begin once the total number of samples seen reaches `average`. So ``average=10`` will begin averaging after seeing 10 samples. Integer values must be in the range `[1, n_samples]`. Attributes ---------- coef_ : ndarray of shape (1, n_features) if n_classes == 2 else \ (n_classes, n_features) Weights assigned to the features. intercept_ : ndarray of shape (1,) if n_classes == 2 else (n_classes,) Constants in decision function. n_iter_ : int The actual number of iterations before reaching the stopping criterion. For multiclass fits, it is the maximum over every binary fit. loss_function_ : concrete ``LossFunction`` classes_ : array of shape (n_classes,) t_ : int Number of weight updates performed during training. Same as ``(n_iter_ * n_samples + 1)``. n_features_in_ : int Number of features seen during :term:`fit`. .. versionadded:: 0.24 feature_names_in_ : ndarray of shape (`n_features_in_`,) Names of features seen during :term:`fit`. Defined only when `X` has feature names that are all strings. .. versionadded:: 1.0 See Also -------- sklearn.svm.LinearSVC : Linear support vector classification. LogisticRegression : Logistic regression. Perceptron : Inherits from SGDClassifier. ``Perceptron()`` is equivalent to ``SGDClassifier(loss="perceptron", eta0=1, learning_rate="constant", penalty=None)``. Examples -------- >>> import numpy as np >>> from sklearn.linear_model import SGDClassifier >>> from sklearn.preprocessing import StandardScaler >>> from sklearn.pipeline import make_pipeline >>> X = np.array([[-1, -1], [-2, -1], [1, 1], [2, 1]]) >>> Y = np.array([1, 1, 2, 2]) >>> # Always scale the input. The most convenient way is to use a pipeline. >>> clf = make_pipeline(StandardScaler(), ... SGDClassifier(max_iter=1000, tol=1e-3)) >>> clf.fit(X, Y) Pipeline(steps=[('standardscaler', StandardScaler()), ('sgdclassifier', SGDClassifier())]) >>> print(clf.predict([[-0.8, -1]])) [1] """ _parameter_constraints: dict = { **BaseSGDClassifier._parameter_constraints, "penalty": [StrOptions({"l2", "l1", "elasticnet"}), None], "alpha": [Interval(Real, 0, None, closed="left")], "l1_ratio": [Interval(Real, 0, 1, closed="both")], "power_t": [Interval(Real, None, None, closed="neither")], "epsilon": [Interval(Real, 0, None, closed="left")], "learning_rate": [ StrOptions({"constant", "optimal", "invscaling", "adaptive"}), Hidden(StrOptions({"pa1", "pa2"})), ], "eta0": [Interval(Real, 0, None, closed="left")], } def __init__( self, loss="hinge", *, penalty="l2", alpha=0.0001, l1_ratio=0.15, fit_intercept=True, max_iter=1000, tol=1e-3, shuffle=True, verbose=0, epsilon=DEFAULT_EPSILON, n_jobs=None, random_state=None, learning_rate="optimal", eta0=0.0, power_t=0.5, early_stopping=False, validation_fraction=0.1, n_iter_no_change=5, class_weight=None, warm_start=False, average=False, ): super().__init__( loss=loss, penalty=penalty, alpha=alpha, l1_ratio=l1_ratio, fit_intercept=fit_intercept, max_iter=max_iter, tol=tol, shuffle=shuffle, verbose=verbose, epsilon=epsilon, n_jobs=n_jobs, random_state=random_state, learning_rate=learning_rate, eta0=eta0, power_t=power_t, early_stopping=early_stopping, validation_fraction=validation_fraction, n_iter_no_change=n_iter_no_change, class_weight=class_weight, warm_start=warm_start, average=average, ) def _check_proba(self): # TODO(1.3): Remove "log" if self.loss not in ("log_loss", "log", "modified_huber"): raise AttributeError( "probability estimates are not available for loss=%r" % self.loss ) return True @available_if(_check_proba) def predict_proba(self, X): """Probability estimates. This method is only available for log loss and modified Huber loss. Multiclass probability estimates are derived from binary (one-vs.-rest) estimates by simple normalization, as recommended by Zadrozny and Elkan. Binary probability estimates for loss="modified_huber" are given by (clip(decision_function(X), -1, 1) + 1) / 2. For other loss functions it is necessary to perform proper probability calibration by wrapping the classifier with :class:`~sklearn.calibration.CalibratedClassifierCV` instead. Parameters ---------- X : {array-like, sparse matrix}, shape (n_samples, n_features) Input data for prediction. Returns ------- ndarray of shape (n_samples, n_classes) Returns the probability of the sample for each class in the model, where classes are ordered as they are in `self.classes_`. References ---------- Zadrozny and Elkan, "Transforming classifier scores into multiclass probability estimates", SIGKDD'02, https://dl.acm.org/doi/pdf/10.1145/775047.775151 The justification for the formula in the loss="modified_huber" case is in the appendix B in: http://jmlr.csail.mit.edu/papers/volume2/zhang02c/zhang02c.pdf """ check_is_fitted(self) # TODO(1.3): Remove "log" if self.loss in ("log_loss", "log"): return self._predict_proba_lr(X) elif self.loss == "modified_huber": binary = len(self.classes_) == 2 scores = self.decision_function(X) if binary: prob2 = np.ones((scores.shape[0], 2)) prob = prob2[:, 1] else: prob = scores np.clip(scores, -1, 1, prob) prob += 1.0 prob /= 2.0 if binary: prob2[:, 0] -= prob prob = prob2 else: # the above might assign zero to all classes, which doesn't # normalize neatly; work around this to produce uniform # probabilities prob_sum = prob.sum(axis=1) all_zero = prob_sum == 0 if np.any(all_zero): prob[all_zero, :] = 1 prob_sum[all_zero] = len(self.classes_) # normalize prob /= prob_sum.reshape((prob.shape[0], -1)) return prob else: raise NotImplementedError( "predict_(log_)proba only supported when" " loss='log_loss' or loss='modified_huber' " "(%r given)" % self.loss ) @available_if(_check_proba) def predict_log_proba(self, X): """Log of probability estimates. This method is only available for log loss and modified Huber loss. When loss="modified_huber", probability estimates may be hard zeros and ones, so taking the logarithm is not possible. See ``predict_proba`` for details. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) Input data for prediction. Returns ------- T : array-like, shape (n_samples, n_classes) Returns the log-probability of the sample for each class in the model, where classes are ordered as they are in `self.classes_`. """ return np.log(self.predict_proba(X)) def _more_tags(self): return { "_xfail_checks": { "check_sample_weights_invariance": ( "zero sample_weight is not equivalent to removing samples" ), } } class BaseSGDRegressor(RegressorMixin, BaseSGD): loss_functions = { "squared_error": (SquaredLoss,), "huber": (Huber, DEFAULT_EPSILON), "epsilon_insensitive": (EpsilonInsensitive, DEFAULT_EPSILON), "squared_epsilon_insensitive": (SquaredEpsilonInsensitive, DEFAULT_EPSILON), } _parameter_constraints: dict = { **BaseSGD._parameter_constraints, "loss": [StrOptions(set(loss_functions))], "early_stopping": ["boolean"], "validation_fraction": [Interval(Real, 0, 1, closed="neither")], "n_iter_no_change": [Interval(Integral, 1, None, closed="left")], } @abstractmethod def __init__( self, loss="squared_error", *, penalty="l2", alpha=0.0001, l1_ratio=0.15, fit_intercept=True, max_iter=1000, tol=1e-3, shuffle=True, verbose=0, epsilon=DEFAULT_EPSILON, random_state=None, learning_rate="invscaling", eta0=0.01, power_t=0.25, early_stopping=False, validation_fraction=0.1, n_iter_no_change=5, warm_start=False, average=False, ): super().__init__( loss=loss, penalty=penalty, alpha=alpha, l1_ratio=l1_ratio, fit_intercept=fit_intercept, max_iter=max_iter, tol=tol, shuffle=shuffle, verbose=verbose, epsilon=epsilon, random_state=random_state, learning_rate=learning_rate, eta0=eta0, power_t=power_t, early_stopping=early_stopping, validation_fraction=validation_fraction, n_iter_no_change=n_iter_no_change, warm_start=warm_start, average=average, ) def _partial_fit( self, X, y, alpha, C, loss, learning_rate, max_iter, sample_weight, coef_init, intercept_init, ): first_call = getattr(self, "coef_", None) is None X, y = self._validate_data( X, y, accept_sparse="csr", copy=False, order="C", dtype=np.float64, accept_large_sparse=False, reset=first_call, ) y = y.astype(np.float64, copy=False) n_samples, n_features = X.shape sample_weight = _check_sample_weight(sample_weight, X) # Allocate datastructures from input arguments if first_call: self._allocate_parameter_mem(1, n_features, coef_init, intercept_init) if self.average > 0 and getattr(self, "_average_coef", None) is None: self._average_coef = np.zeros(n_features, dtype=np.float64, order="C") self._average_intercept = np.zeros(1, dtype=np.float64, order="C") self._fit_regressor( X, y, alpha, C, loss, learning_rate, sample_weight, max_iter ) return self def partial_fit(self, X, y, sample_weight=None): """Perform one epoch of stochastic gradient descent on given samples. Internally, this method uses ``max_iter = 1``. Therefore, it is not guaranteed that a minimum of the cost function is reached after calling it once. Matters such as objective convergence and early stopping should be handled by the user. Parameters ---------- X : {array-like, sparse matrix}, shape (n_samples, n_features) Subset of training data. y : numpy array of shape (n_samples,) Subset of target values. sample_weight : array-like, shape (n_samples,), default=None Weights applied to individual samples. If not provided, uniform weights are assumed. Returns ------- self : object Returns an instance of self. """ if not hasattr(self, "coef_"): self._validate_params() self._more_validate_params(for_partial_fit=True) return self._partial_fit( X, y, self.alpha, C=1.0, loss=self.loss, learning_rate=self.learning_rate, max_iter=1, sample_weight=sample_weight, coef_init=None, intercept_init=None, ) def _fit( self, X, y, alpha, C, loss, learning_rate, coef_init=None, intercept_init=None, sample_weight=None, ): if self.warm_start and getattr(self, "coef_", None) is not None: if coef_init is None: coef_init = self.coef_ if intercept_init is None: intercept_init = self.intercept_ else: self.coef_ = None self.intercept_ = None # Clear iteration count for multiple call to fit. self.t_ = 1.0 self._partial_fit( X, y, alpha, C, loss, learning_rate, self.max_iter, sample_weight, coef_init, intercept_init, ) if ( self.tol is not None and self.tol > -np.inf and self.n_iter_ == self.max_iter ): warnings.warn( "Maximum number of iteration reached before " "convergence. Consider increasing max_iter to " "improve the fit.", ConvergenceWarning, ) return self def fit(self, X, y, coef_init=None, intercept_init=None, sample_weight=None): """Fit linear model with Stochastic Gradient Descent. Parameters ---------- X : {array-like, sparse matrix}, shape (n_samples, n_features) Training data. y : ndarray of shape (n_samples,) Target values. coef_init : ndarray of shape (n_features,), default=None The initial coefficients to warm-start the optimization. intercept_init : ndarray of shape (1,), default=None The initial intercept to warm-start the optimization. sample_weight : array-like, shape (n_samples,), default=None Weights applied to individual samples (1. for unweighted). Returns ------- self : object Fitted `SGDRegressor` estimator. """ self._validate_params() self._more_validate_params() return self._fit( X, y, alpha=self.alpha, C=1.0, loss=self.loss, learning_rate=self.learning_rate, coef_init=coef_init, intercept_init=intercept_init, sample_weight=sample_weight, ) def _decision_function(self, X): """Predict using the linear model Parameters ---------- X : {array-like, sparse matrix}, shape (n_samples, n_features) Returns ------- ndarray of shape (n_samples,) Predicted target values per element in X. """ check_is_fitted(self) X = self._validate_data(X, accept_sparse="csr", reset=False) scores = safe_sparse_dot(X, self.coef_.T, dense_output=True) + self.intercept_ return scores.ravel() def predict(self, X): """Predict using the linear model. Parameters ---------- X : {array-like, sparse matrix}, shape (n_samples, n_features) Input data. Returns ------- ndarray of shape (n_samples,) Predicted target values per element in X. """ return self._decision_function(X) def _fit_regressor( self, X, y, alpha, C, loss, learning_rate, sample_weight, max_iter ): loss_function = self._get_loss_function(loss) penalty_type = self._get_penalty_type(self.penalty) learning_rate_type = self._get_learning_rate_type(learning_rate) if not hasattr(self, "t_"): self.t_ = 1.0 validation_mask = self._make_validation_split(y) validation_score_cb = self._make_validation_score_cb( validation_mask, X, y, sample_weight ) random_state = check_random_state(self.random_state) # numpy mtrand expects a C long which is a signed 32 bit integer under # Windows seed = random_state.randint(0, MAX_INT) dataset, intercept_decay = make_dataset( X, y, sample_weight, random_state=random_state ) tol = self.tol if self.tol is not None else -np.inf if self.average: coef = self._standard_coef intercept = self._standard_intercept average_coef = self._average_coef average_intercept = self._average_intercept else: coef = self.coef_ intercept = self.intercept_ average_coef = None # Not used average_intercept = [0] # Not used coef, intercept, average_coef, average_intercept, self.n_iter_ = _plain_sgd( coef, intercept[0], average_coef, average_intercept[0], loss_function, penalty_type, alpha, C, self.l1_ratio, dataset, validation_mask, self.early_stopping, validation_score_cb, int(self.n_iter_no_change), max_iter, tol, int(self.fit_intercept), int(self.verbose), int(self.shuffle), seed, 1.0, 1.0, learning_rate_type, self.eta0, self.power_t, 0, self.t_, intercept_decay, self.average, ) self.t_ += self.n_iter_ * X.shape[0] if self.average > 0: self._average_intercept = np.atleast_1d(average_intercept) self._standard_intercept = np.atleast_1d(intercept) if self.average <= self.t_ - 1.0: # made enough updates for averaging to be taken into account self.coef_ = average_coef self.intercept_ = np.atleast_1d(average_intercept) else: self.coef_ = coef self.intercept_ = np.atleast_1d(intercept) else: self.intercept_ = np.atleast_1d(intercept) class SGDRegressor(BaseSGDRegressor): """Linear model fitted by minimizing a regularized empirical loss with SGD. SGD stands for Stochastic Gradient Descent: the gradient of the loss is estimated each sample at a time and the model is updated along the way with a decreasing strength schedule (aka learning rate). The regularizer is a penalty added to the loss function that shrinks model parameters towards the zero vector using either the squared euclidean norm L2 or the absolute norm L1 or a combination of both (Elastic Net). If the parameter update crosses the 0.0 value because of the regularizer, the update is truncated to 0.0 to allow for learning sparse models and achieve online feature selection. This implementation works with data represented as dense numpy arrays of floating point values for the features. Read more in the :ref:`User Guide <sgd>`. Parameters ---------- loss : str, default='squared_error' The loss function to be used. The possible values are 'squared_error', 'huber', 'epsilon_insensitive', or 'squared_epsilon_insensitive' The 'squared_error' refers to the ordinary least squares fit. 'huber' modifies 'squared_error' to focus less on getting outliers correct by switching from squared to linear loss past a distance of epsilon. 'epsilon_insensitive' ignores errors less than epsilon and is linear past that; this is the loss function used in SVR. 'squared_epsilon_insensitive' is the same but becomes squared loss past a tolerance of epsilon. More details about the losses formulas can be found in the :ref:`User Guide <sgd_mathematical_formulation>`. penalty : {'l2', 'l1', 'elasticnet', None}, default='l2' The penalty (aka regularization term) to be used. Defaults to 'l2' which is the standard regularizer for linear SVM models. 'l1' and 'elasticnet' might bring sparsity to the model (feature selection) not achievable with 'l2'. No penalty is added when set to `None`. alpha : float, default=0.0001 Constant that multiplies the regularization term. The higher the value, the stronger the regularization. Also used to compute the learning rate when set to `learning_rate` is set to 'optimal'. l1_ratio : float, default=0.15 The Elastic Net mixing parameter, with 0 <= l1_ratio <= 1. l1_ratio=0 corresponds to L2 penalty, l1_ratio=1 to L1. Only used if `penalty` is 'elasticnet'. fit_intercept : bool, default=True Whether the intercept should be estimated or not. If False, the data is assumed to be already centered. max_iter : int, default=1000 The maximum number of passes over the training data (aka epochs). It only impacts the behavior in the ``fit`` method, and not the :meth:`partial_fit` method. .. versionadded:: 0.19 tol : float or None, default=1e-3 The stopping criterion. If it is not None, training will stop when (loss > best_loss - tol) for ``n_iter_no_change`` consecutive epochs. Convergence is checked against the training loss or the validation loss depending on the `early_stopping` parameter. .. versionadded:: 0.19 shuffle : bool, default=True Whether or not the training data should be shuffled after each epoch. verbose : int, default=0 The verbosity level. epsilon : float, default=0.1 Epsilon in the epsilon-insensitive loss functions; only if `loss` is 'huber', 'epsilon_insensitive', or 'squared_epsilon_insensitive'. For 'huber', determines the threshold at which it becomes less important to get the prediction exactly right. For epsilon-insensitive, any differences between the current prediction and the correct label are ignored if they are less than this threshold. random_state : int, RandomState instance, default=None Used for shuffling the data, when ``shuffle`` is set to ``True``. Pass an int for reproducible output across multiple function calls. See :term:`Glossary <random_state>`. learning_rate : str, default='invscaling' The learning rate schedule: - 'constant': `eta = eta0` - 'optimal': `eta = 1.0 / (alpha * (t + t0))` where t0 is chosen by a heuristic proposed by Leon Bottou. - 'invscaling': `eta = eta0 / pow(t, power_t)` - 'adaptive': eta = eta0, as long as the training keeps decreasing. Each time n_iter_no_change consecutive epochs fail to decrease the training loss by tol or fail to increase validation score by tol if early_stopping is True, the current learning rate is divided by 5. .. versionadded:: 0.20 Added 'adaptive' option eta0 : float, default=0.01 The initial learning rate for the 'constant', 'invscaling' or 'adaptive' schedules. The default value is 0.01. power_t : float, default=0.25 The exponent for inverse scaling learning rate. early_stopping : bool, default=False Whether to use early stopping to terminate training when validation score is not improving. If set to True, it will automatically set aside a fraction of training data as validation and terminate training when validation score returned by the `score` method is not improving by at least `tol` for `n_iter_no_change` consecutive epochs. .. versionadded:: 0.20 Added 'early_stopping' option validation_fraction : float, default=0.1 The proportion of training data to set aside as validation set for early stopping. Must be between 0 and 1. Only used if `early_stopping` is True. .. versionadded:: 0.20 Added 'validation_fraction' option n_iter_no_change : int, default=5 Number of iterations with no improvement to wait before stopping fitting. Convergence is checked against the training loss or the validation loss depending on the `early_stopping` parameter. .. versionadded:: 0.20 Added 'n_iter_no_change' option warm_start : bool, default=False When set to True, reuse the solution of the previous call to fit as initialization, otherwise, just erase the previous solution. See :term:`the Glossary <warm_start>`. Repeatedly calling fit or partial_fit when warm_start is True can result in a different solution than when calling fit a single time because of the way the data is shuffled. If a dynamic learning rate is used, the learning rate is adapted depending on the number of samples already seen. Calling ``fit`` resets this counter, while ``partial_fit`` will result in increasing the existing counter. average : bool or int, default=False When set to True, computes the averaged SGD weights across all updates and stores the result in the ``coef_`` attribute. If set to an int greater than 1, averaging will begin once the total number of samples seen reaches `average`. So ``average=10`` will begin averaging after seeing 10 samples. Attributes ---------- coef_ : ndarray of shape (n_features,) Weights assigned to the features. intercept_ : ndarray of shape (1,) The intercept term. n_iter_ : int The actual number of iterations before reaching the stopping criterion. t_ : int Number of weight updates performed during training. Same as ``(n_iter_ * n_samples + 1)``. n_features_in_ : int Number of features seen during :term:`fit`. .. versionadded:: 0.24 feature_names_in_ : ndarray of shape (`n_features_in_`,) Names of features seen during :term:`fit`. Defined only when `X` has feature names that are all strings. .. versionadded:: 1.0 See Also -------- HuberRegressor : Linear regression model that is robust to outliers. Lars : Least Angle Regression model. Lasso : Linear Model trained with L1 prior as regularizer. RANSACRegressor : RANSAC (RANdom SAmple Consensus) algorithm. Ridge : Linear least squares with l2 regularization. sklearn.svm.SVR : Epsilon-Support Vector Regression. TheilSenRegressor : Theil-Sen Estimator robust multivariate regression model. Examples -------- >>> import numpy as np >>> from sklearn.linear_model import SGDRegressor >>> from sklearn.pipeline import make_pipeline >>> from sklearn.preprocessing import StandardScaler >>> n_samples, n_features = 10, 5 >>> rng = np.random.RandomState(0) >>> y = rng.randn(n_samples) >>> X = rng.randn(n_samples, n_features) >>> # Always scale the input. The most convenient way is to use a pipeline. >>> reg = make_pipeline(StandardScaler(), ... SGDRegressor(max_iter=1000, tol=1e-3)) >>> reg.fit(X, y) Pipeline(steps=[('standardscaler', StandardScaler()), ('sgdregressor', SGDRegressor())]) """ _parameter_constraints: dict = { **BaseSGDRegressor._parameter_constraints, "penalty": [StrOptions({"l2", "l1", "elasticnet"}), None], "alpha": [Interval(Real, 0, None, closed="left")], "l1_ratio": [Interval(Real, 0, 1, closed="both")], "power_t": [Interval(Real, None, None, closed="neither")], "learning_rate": [ StrOptions({"constant", "optimal", "invscaling", "adaptive"}), Hidden(StrOptions({"pa1", "pa2"})), ], "epsilon": [Interval(Real, 0, None, closed="left")], "eta0": [Interval(Real, 0, None, closed="left")], } def __init__( self, loss="squared_error", *, penalty="l2", alpha=0.0001, l1_ratio=0.15, fit_intercept=True, max_iter=1000, tol=1e-3, shuffle=True, verbose=0, epsilon=DEFAULT_EPSILON, random_state=None, learning_rate="invscaling", eta0=0.01, power_t=0.25, early_stopping=False, validation_fraction=0.1, n_iter_no_change=5, warm_start=False, average=False, ): super().__init__( loss=loss, penalty=penalty, alpha=alpha, l1_ratio=l1_ratio, fit_intercept=fit_intercept, max_iter=max_iter, tol=tol, shuffle=shuffle, verbose=verbose, epsilon=epsilon, random_state=random_state, learning_rate=learning_rate, eta0=eta0, power_t=power_t, early_stopping=early_stopping, validation_fraction=validation_fraction, n_iter_no_change=n_iter_no_change, warm_start=warm_start, average=average, ) def _more_tags(self): return { "_xfail_checks": { "check_sample_weights_invariance": ( "zero sample_weight is not equivalent to removing samples" ), } } class SGDOneClassSVM(BaseSGD, OutlierMixin): """Solves linear One-Class SVM using Stochastic Gradient Descent. This implementation is meant to be used with a kernel approximation technique (e.g. `sklearn.kernel_approximation.Nystroem`) to obtain results similar to `sklearn.svm.OneClassSVM` which uses a Gaussian kernel by default. Read more in the :ref:`User Guide <sgd_online_one_class_svm>`. .. versionadded:: 1.0 Parameters ---------- nu : float, default=0.5 The nu parameter of the One Class SVM: an upper bound on the fraction of training errors and a lower bound of the fraction of support vectors. Should be in the interval (0, 1]. By default 0.5 will be taken. fit_intercept : bool, default=True Whether the intercept should be estimated or not. Defaults to True. max_iter : int, default=1000 The maximum number of passes over the training data (aka epochs). It only impacts the behavior in the ``fit`` method, and not the `partial_fit`. Defaults to 1000. tol : float or None, default=1e-3 The stopping criterion. If it is not None, the iterations will stop when (loss > previous_loss - tol). Defaults to 1e-3. shuffle : bool, default=True Whether or not the training data should be shuffled after each epoch. Defaults to True. verbose : int, default=0 The verbosity level. random_state : int, RandomState instance or None, default=None The seed of the pseudo random number generator to use when shuffling the data. If int, random_state is the seed used by the random number generator; If RandomState instance, random_state is the random number generator; If None, the random number generator is the RandomState instance used by `np.random`. learning_rate : {'constant', 'optimal', 'invscaling', 'adaptive'}, default='optimal' The learning rate schedule to use with `fit`. (If using `partial_fit`, learning rate must be controlled directly). - 'constant': `eta = eta0` - 'optimal': `eta = 1.0 / (alpha * (t + t0))` where t0 is chosen by a heuristic proposed by Leon Bottou. - 'invscaling': `eta = eta0 / pow(t, power_t)` - 'adaptive': eta = eta0, as long as the training keeps decreasing. Each time n_iter_no_change consecutive epochs fail to decrease the training loss by tol or fail to increase validation score by tol if early_stopping is True, the current learning rate is divided by 5. eta0 : float, default=0.0 The initial learning rate for the 'constant', 'invscaling' or 'adaptive' schedules. The default value is 0.0 as eta0 is not used by the default schedule 'optimal'. power_t : float, default=0.5 The exponent for inverse scaling learning rate [default 0.5]. warm_start : bool, default=False When set to True, reuse the solution of the previous call to fit as initialization, otherwise, just erase the previous solution. See :term:`the Glossary <warm_start>`. Repeatedly calling fit or partial_fit when warm_start is True can result in a different solution than when calling fit a single time because of the way the data is shuffled. If a dynamic learning rate is used, the learning rate is adapted depending on the number of samples already seen. Calling ``fit`` resets this counter, while ``partial_fit`` will result in increasing the existing counter. average : bool or int, default=False When set to True, computes the averaged SGD weights and stores the result in the ``coef_`` attribute. If set to an int greater than 1, averaging will begin once the total number of samples seen reaches average. So ``average=10`` will begin averaging after seeing 10 samples. Attributes ---------- coef_ : ndarray of shape (1, n_features) Weights assigned to the features. offset_ : ndarray of shape (1,) Offset used to define the decision function from the raw scores. We have the relation: decision_function = score_samples - offset. n_iter_ : int The actual number of iterations to reach the stopping criterion. t_ : int Number of weight updates performed during training. Same as ``(n_iter_ * n_samples + 1)``. loss_function_ : concrete ``LossFunction`` n_features_in_ : int Number of features seen during :term:`fit`. .. versionadded:: 0.24 feature_names_in_ : ndarray of shape (`n_features_in_`,) Names of features seen during :term:`fit`. Defined only when `X` has feature names that are all strings. .. versionadded:: 1.0 See Also -------- sklearn.svm.OneClassSVM : Unsupervised Outlier Detection. Notes ----- This estimator has a linear complexity in the number of training samples and is thus better suited than the `sklearn.svm.OneClassSVM` implementation for datasets with a large number of training samples (say > 10,000). Examples -------- >>> import numpy as np >>> from sklearn import linear_model >>> X = np.array([[-1, -1], [-2, -1], [1, 1], [2, 1]]) >>> clf = linear_model.SGDOneClassSVM(random_state=42) >>> clf.fit(X) SGDOneClassSVM(random_state=42) >>> print(clf.predict([[4, 4]])) [1] """ loss_functions = {"hinge": (Hinge, 1.0)} _parameter_constraints: dict = { **BaseSGD._parameter_constraints, "nu": [Interval(Real, 0.0, 1.0, closed="right")], "learning_rate": [ StrOptions({"constant", "optimal", "invscaling", "adaptive"}), Hidden(StrOptions({"pa1", "pa2"})), ], "eta0": [Interval(Real, 0, None, closed="left")], "power_t": [Interval(Real, None, None, closed="neither")], } def __init__( self, nu=0.5, fit_intercept=True, max_iter=1000, tol=1e-3, shuffle=True, verbose=0, random_state=None, learning_rate="optimal", eta0=0.0, power_t=0.5, warm_start=False, average=False, ): self.nu = nu super(SGDOneClassSVM, self).__init__( loss="hinge", penalty="l2", C=1.0, l1_ratio=0, fit_intercept=fit_intercept, max_iter=max_iter, tol=tol, shuffle=shuffle, verbose=verbose, epsilon=DEFAULT_EPSILON, random_state=random_state, learning_rate=learning_rate, eta0=eta0, power_t=power_t, early_stopping=False, validation_fraction=0.1, n_iter_no_change=5, warm_start=warm_start, average=average, ) def _fit_one_class(self, X, alpha, C, sample_weight, learning_rate, max_iter): """Uses SGD implementation with X and y=np.ones(n_samples).""" # The One-Class SVM uses the SGD implementation with # y=np.ones(n_samples). n_samples = X.shape[0] y = np.ones(n_samples, dtype=np.float64, order="C") dataset, offset_decay = make_dataset(X, y, sample_weight) penalty_type = self._get_penalty_type(self.penalty) learning_rate_type = self._get_learning_rate_type(learning_rate) # early stopping is set to False for the One-Class SVM. thus # validation_mask and validation_score_cb will be set to values # associated to early_stopping=False in _make_validation_split and # _make_validation_score_cb respectively. validation_mask = self._make_validation_split(y) validation_score_cb = self._make_validation_score_cb( validation_mask, X, y, sample_weight ) random_state = check_random_state(self.random_state) # numpy mtrand expects a C long which is a signed 32 bit integer under # Windows seed = random_state.randint(0, np.iinfo(np.int32).max) tol = self.tol if self.tol is not None else -np.inf one_class = 1 # There are no class weights for the One-Class SVM and they are # therefore set to 1. pos_weight = 1 neg_weight = 1 if self.average: coef = self._standard_coef intercept = self._standard_intercept average_coef = self._average_coef average_intercept = self._average_intercept else: coef = self.coef_ intercept = 1 - self.offset_ average_coef = None # Not used average_intercept = [0] # Not used coef, intercept, average_coef, average_intercept, self.n_iter_ = _plain_sgd( coef, intercept[0], average_coef, average_intercept[0], self.loss_function_, penalty_type, alpha, C, self.l1_ratio, dataset, validation_mask, self.early_stopping, validation_score_cb, int(self.n_iter_no_change), max_iter, tol, int(self.fit_intercept), int(self.verbose), int(self.shuffle), seed, neg_weight, pos_weight, learning_rate_type, self.eta0, self.power_t, one_class, self.t_, offset_decay, self.average, ) self.t_ += self.n_iter_ * n_samples if self.average > 0: self._average_intercept = np.atleast_1d(average_intercept) self._standard_intercept = np.atleast_1d(intercept) if self.average <= self.t_ - 1.0: # made enough updates for averaging to be taken into account self.coef_ = average_coef self.offset_ = 1 - np.atleast_1d(average_intercept) else: self.coef_ = coef self.offset_ = 1 - np.atleast_1d(intercept) else: self.offset_ = 1 - np.atleast_1d(intercept) def _partial_fit( self, X, alpha, C, loss, learning_rate, max_iter, sample_weight, coef_init, offset_init, ): first_call = getattr(self, "coef_", None) is None X = self._validate_data( X, None, accept_sparse="csr", dtype=np.float64, order="C", accept_large_sparse=False, reset=first_call, ) n_features = X.shape[1] # Allocate datastructures from input arguments sample_weight = _check_sample_weight(sample_weight, X) # We use intercept = 1 - offset where intercept is the intercept of # the SGD implementation and offset is the offset of the One-Class SVM # optimization problem. if getattr(self, "coef_", None) is None or coef_init is not None: self._allocate_parameter_mem(1, n_features, coef_init, offset_init, 1) elif n_features != self.coef_.shape[-1]: raise ValueError( "Number of features %d does not match previous data %d." % (n_features, self.coef_.shape[-1]) ) if self.average and getattr(self, "_average_coef", None) is None: self._average_coef = np.zeros(n_features, dtype=np.float64, order="C") self._average_intercept = np.zeros(1, dtype=np.float64, order="C") self.loss_function_ = self._get_loss_function(loss) if not hasattr(self, "t_"): self.t_ = 1.0 # delegate to concrete training procedure self._fit_one_class( X, alpha=alpha, C=C, learning_rate=learning_rate, sample_weight=sample_weight, max_iter=max_iter, ) return self def partial_fit(self, X, y=None, sample_weight=None): """Fit linear One-Class SVM with Stochastic Gradient Descent. Parameters ---------- X : {array-like, sparse matrix}, shape (n_samples, n_features) Subset of the training data. y : Ignored Not used, present for API consistency by convention. sample_weight : array-like, shape (n_samples,), optional Weights applied to individual samples. If not provided, uniform weights are assumed. Returns ------- self : object Returns a fitted instance of self. """ if not hasattr(self, "coef_"): self._validate_params() self._more_validate_params(for_partial_fit=True) alpha = self.nu / 2 return self._partial_fit( X, alpha, C=1.0, loss=self.loss, learning_rate=self.learning_rate, max_iter=1, sample_weight=sample_weight, coef_init=None, offset_init=None, ) def _fit( self, X, alpha, C, loss, learning_rate, coef_init=None, offset_init=None, sample_weight=None, ): if self.warm_start and hasattr(self, "coef_"): if coef_init is None: coef_init = self.coef_ if offset_init is None: offset_init = self.offset_ else: self.coef_ = None self.offset_ = None # Clear iteration count for multiple call to fit. self.t_ = 1.0 self._partial_fit( X, alpha, C, loss, learning_rate, self.max_iter, sample_weight, coef_init, offset_init, ) if ( self.tol is not None and self.tol > -np.inf and self.n_iter_ == self.max_iter ): warnings.warn( "Maximum number of iteration reached before " "convergence. Consider increasing max_iter to " "improve the fit.", ConvergenceWarning, ) return self def fit(self, X, y=None, coef_init=None, offset_init=None, sample_weight=None): """Fit linear One-Class SVM with Stochastic Gradient Descent. This solves an equivalent optimization problem of the One-Class SVM primal optimization problem and returns a weight vector w and an offset rho such that the decision function is given by <w, x> - rho. Parameters ---------- X : {array-like, sparse matrix}, shape (n_samples, n_features) Training data. y : Ignored Not used, present for API consistency by convention. coef_init : array, shape (n_classes, n_features) The initial coefficients to warm-start the optimization. offset_init : array, shape (n_classes,) The initial offset to warm-start the optimization. sample_weight : array-like, shape (n_samples,), optional Weights applied to individual samples. If not provided, uniform weights are assumed. These weights will be multiplied with class_weight (passed through the constructor) if class_weight is specified. Returns ------- self : object Returns a fitted instance of self. """ self._validate_params() self._more_validate_params() alpha = self.nu / 2 self._fit( X, alpha=alpha, C=1.0, loss=self.loss, learning_rate=self.learning_rate, coef_init=coef_init, offset_init=offset_init, sample_weight=sample_weight, ) return self def decision_function(self, X): """Signed distance to the separating hyperplane. Signed distance is positive for an inlier and negative for an outlier. Parameters ---------- X : {array-like, sparse matrix}, shape (n_samples, n_features) Testing data. Returns ------- dec : array-like, shape (n_samples,) Decision function values of the samples. """ check_is_fitted(self, "coef_") X = self._validate_data(X, accept_sparse="csr", reset=False) decisions = safe_sparse_dot(X, self.coef_.T, dense_output=True) - self.offset_ return decisions.ravel() def score_samples(self, X): """Raw scoring function of the samples. Parameters ---------- X : {array-like, sparse matrix}, shape (n_samples, n_features) Testing data. Returns ------- score_samples : array-like, shape (n_samples,) Unshiffted scoring function values of the samples. """ score_samples = self.decision_function(X) + self.offset_ return score_samples def predict(self, X): """Return labels (1 inlier, -1 outlier) of the samples. Parameters ---------- X : {array-like, sparse matrix}, shape (n_samples, n_features) Testing data. Returns ------- y : array, shape (n_samples,) Labels of the samples. """ y = (self.decision_function(X) >= 0).astype(np.int32) y[y == 0] = -1 # for consistency with outlier detectors return y def _more_tags(self): return { "_xfail_checks": { "check_sample_weights_invariance": ( "zero sample_weight is not equivalent to removing samples" ) } }
bsd-3-clause
makcedward/nlpaug
docs/conf.py
1
5657
#!/usr/bin/env python3 # -*- coding: utf-8 -*- # # nlpaug documentation build configuration file, created by # sphinx-quickstart on Wed Aug 7 07:37:05 2019. # # This file is execfile()d with the current directory set to its # containing dir. # # Note that not all possible configuration values are present in this # autogenerated file. # # All configuration values have a default; values that are commented out # serve to show the default. # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. # import sys, os from unittest.mock import MagicMock sys.path.append(os.path.abspath('..')) # Mock module to bypass pip install class Mock(MagicMock): @classmethod def __getattr__(cls, name): return MagicMock() MOCK_MODULES = [ 'librosa', 'librosa.display', 'numpy', 'nltk', 'matplotlib', 'matplotlib.pyplot', 'setuptools', 'python-dotenv', 'nltk.corpus', 'torch', 'transformers', 'pandas'] sys.modules.update((mod_name, Mock()) for mod_name in MOCK_MODULES) # -- General configuration ------------------------------------------------ # If your documentation needs a minimal Sphinx version, state it here. # # needs_sphinx = '1.0' # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom # ones. extensions = ['sphinx.ext.doctest', 'sphinx.ext.todo', 'sphinx.ext.coverage', 'sphinx.ext.mathjax', 'sphinx.ext.viewcode', 'sphinx.ext.githubpages', 'sphinx.ext.autodoc'] # Add any paths that contain templates here, relative to this directory. templates_path = ['_templates'] # The suffix(es) of source filenames. # You can specify multiple suffix as a list of string: # # source_suffix = ['.rst', '.md'] source_suffix = '.rst' # The master toctree document. master_doc = 'index' # General information about the project. project = 'nlpaug' copyright = '2019, Edward Ma' author = 'Edward Ma' # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the # built documents. # # The short X.Y version. version = '1.1.11' # The full version, including alpha/beta/rc tags. release = '1.1.11' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. # # This is also used if you do content translation via gettext catalogs. # Usually you set "language" from the command line for these cases. language = None # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. # This patterns also effect to html_static_path and html_extra_path exclude_patterns = [] # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'sphinx' # If true, `todo` and `todoList` produce output, else they produce nothing. todo_include_todos = True # -- Options for HTML output ---------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. # # html_theme = 'alabaster' html_theme = 'sphinx_rtd_theme' # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. # # html_theme_options = {} # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = ['_static'] # Custom sidebar templates, must be a dictionary that maps document names # to template names. # # This is required for the alabaster theme # refs: http://alabaster.readthedocs.io/en/latest/installation.html#sidebars html_sidebars = { '**': [ 'relations.html', # needs 'show_related': True theme option to display 'searchbox.html', ] } # -- Options for HTMLHelp output ------------------------------------------ # Output file base name for HTML help builder. htmlhelp_basename = 'nlpaugdoc' # -- Options for LaTeX output --------------------------------------------- latex_elements = { # The paper size ('letterpaper' or 'a4paper'). # # 'papersize': 'letterpaper', # The font size ('10pt', '11pt' or '12pt'). # # 'pointsize': '10pt', # Additional stuff for the LaTeX preamble. # # 'preamble': '', # Latex figure (float) alignment # # 'figure_align': 'htbp', } # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, # author, documentclass [howto, manual, or own class]). latex_documents = [ (master_doc, 'nlpaug.tex', 'nlpaug Documentation', 'Edward Ma', 'manual'), ] # -- Options for manual page output --------------------------------------- # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). man_pages = [ (master_doc, 'nlpaug', 'nlpaug Documentation', [author], 1) ] # -- Options for Texinfo output ------------------------------------------- # Grouping the document tree into Texinfo files. List of tuples # (source start file, target name, title, author, # dir menu entry, description, category) texinfo_documents = [ (master_doc, 'nlpaug', 'nlpaug Documentation', author, 'nlpaug', 'One line description of project.', 'Miscellaneous'), ]
mit
zhmxu/nyu_ml_lectures
fetch_data.py
20
2545
import os try: from urllib.request import urlopen except ImportError: from urllib import urlopen import zipfile SENTIMENT140_URL = ("http://cs.stanford.edu/people/alecmgo/" "trainingandtestdata.zip") SENTIMENT140_ARCHIVE_NAME = "trainingandtestdata.zip" def get_datasets_folder(): here = os.path.dirname(__file__) notebooks = os.path.join(here, 'notebooks') datasets_folder = os.path.abspath(os.path.join(notebooks, 'datasets')) datasets_archive = os.path.abspath(os.path.join(notebooks, 'datasets.zip')) if not os.path.exists(datasets_folder): if os.path.exists(datasets_archive): print("Extracting " + datasets_archive) zf = zipfile.ZipFile(datasets_archive) zf.extractall('.') assert os.path.exists(datasets_folder) else: print("Creating datasets folder: " + datasets_folder) os.makedirs(datasets_folder) else: print("Using existing dataset folder:" + datasets_folder) return datasets_folder def check_sentiment140(datasets_folder): print("Checking availability of the sentiment 140 dataset") archive_path = os.path.join(datasets_folder, SENTIMENT140_ARCHIVE_NAME) sentiment140_path = os.path.join(datasets_folder, 'sentiment140') train_path = os.path.join(sentiment140_path, 'training.1600000.processed.noemoticon.csv') test_path = os.path.join(sentiment140_path, 'testdata.manual.2009.06.14.csv') if not os.path.exists(sentiment140_path): if not os.path.exists(archive_path): print("Downloading dataset from %s (77MB)" % SENTIMENT140_URL) opener = urlopen(SENTIMENT140_URL) open(archive_path, 'wb').write(opener.read()) else: print("Found archive: " + archive_path) print("Extracting %s to %s" % (archive_path, sentiment140_path)) zf = zipfile.ZipFile(archive_path) zf.extractall(sentiment140_path) print("Checking that the sentiment 140 CSV files exist...") assert os.path.exists(train_path) assert os.path.exists(test_path) print("=> Success!") if __name__ == "__main__": datasets_folder = get_datasets_folder() check_sentiment140(datasets_folder) print("Loading Labeled Faces Data (~200MB)") from sklearn.datasets import fetch_lfw_people fetch_lfw_people(min_faces_per_person=70, resize=0.4, data_home=datasets_folder) print("=> Success!")
cc0-1.0
groutr/numpy
numpy/lib/function_base.py
5
143332
from __future__ import division, absolute_import, print_function import warnings import sys import collections import operator import numpy as np import numpy.core.numeric as _nx from numpy.core import linspace, atleast_1d, atleast_2d from numpy.core.numeric import ( ones, zeros, arange, concatenate, array, asarray, asanyarray, empty, empty_like, ndarray, around, floor, ceil, take, dot, where, intp, integer, isscalar ) from numpy.core.umath import ( pi, multiply, add, arctan2, frompyfunc, cos, less_equal, sqrt, sin, mod, exp, log10 ) from numpy.core.fromnumeric import ( ravel, nonzero, sort, partition, mean, any, sum ) from numpy.core.numerictypes import typecodes, number from numpy.lib.twodim_base import diag from .utils import deprecate from numpy.core.multiarray import _insert, add_docstring from numpy.core.multiarray import digitize, bincount, interp as compiled_interp from numpy.core.umath import _add_newdoc_ufunc as add_newdoc_ufunc from numpy.compat import long from numpy.compat.py3k import basestring # Force range to be a generator, for np.delete's usage. if sys.version_info[0] < 3: range = xrange __all__ = [ 'select', 'piecewise', 'trim_zeros', 'copy', 'iterable', 'percentile', 'diff', 'gradient', 'angle', 'unwrap', 'sort_complex', 'disp', 'extract', 'place', 'vectorize', 'asarray_chkfinite', 'average', 'histogram', 'histogramdd', 'bincount', 'digitize', 'cov', 'corrcoef', 'msort', 'median', 'sinc', 'hamming', 'hanning', 'bartlett', 'blackman', 'kaiser', 'trapz', 'i0', 'add_newdoc', 'add_docstring', 'meshgrid', 'delete', 'insert', 'append', 'interp', 'add_newdoc_ufunc' ] def iterable(y): """ Check whether or not an object can be iterated over. Parameters ---------- y : object Input object. Returns ------- b : {0, 1} Return 1 if the object has an iterator method or is a sequence, and 0 otherwise. Examples -------- >>> np.iterable([1, 2, 3]) 1 >>> np.iterable(2) 0 """ try: iter(y) except: return 0 return 1 def _hist_optim_numbins_estimator(a, estimator): """ A helper function to be called from histogram to deal with estimating optimal number of bins estimator: str If estimator is one of ['auto', 'fd', 'scott', 'rice', 'sturges'] this function will choose the appropriate estimator and return it's estimate for the optimal number of bins. """ assert isinstance(estimator, basestring) # private function should not be called otherwise if a.size == 0: return 1 def sturges(x): """ Sturges Estimator A very simplistic estimator based on the assumption of normality of the data Poor performance for non-normal data, especially obvious for large X. Depends only on size of the data. """ return np.ceil(np.log2(x.size)) + 1 def rice(x): """ Rice Estimator Another simple estimator, with no normality assumption. It has better performance for large data, but tends to overestimate number of bins. The number of bins is proportional to the cube root of data size (asymptotically optimal) Depends only on size of the data """ return np.ceil(2 * x.size ** (1.0 / 3)) def scott(x): """ Scott Estimator The binwidth is proportional to the standard deviation of the data and inversely proportional to the cube root of data size (asymptotically optimal) """ h = 3.5 * x.std() * x.size ** (-1.0 / 3) if h > 0: return np.ceil(x.ptp() / h) return 1 def fd(x): """ Freedman Diaconis rule using Inter Quartile Range (IQR) for binwidth Considered a variation of the Scott rule with more robustness as the IQR is less affected by outliers than the standard deviation. However the IQR depends on fewer points than the sd so it is less accurate, especially for long tailed distributions. If the IQR is 0, we return 1 for the number of bins. Binwidth is inversely proportional to the cube root of data size (asymptotically optimal) """ iqr = np.subtract(*np.percentile(x, [75, 25])) if iqr > 0: h = (2 * iqr * x.size ** (-1.0 / 3)) return np.ceil(x.ptp() / h) # If iqr is 0, default number of bins is 1 return 1 def auto(x): """ The FD estimator is usually the most robust method, but it tends to be too small for small X. The Sturges estimator is quite good for small (<1000) datasets and is the default in R. This method gives good off the shelf behaviour. """ return max(fd(x), sturges(x)) optimal_numbins_methods = {'sturges': sturges, 'rice': rice, 'scott': scott, 'fd': fd, 'auto': auto} try: estimator_func = optimal_numbins_methods[estimator.lower()] except KeyError: raise ValueError("{0} not a valid method for `bins`".format(estimator)) else: # these methods return floats, np.histogram requires an int return int(estimator_func(a)) def histogram(a, bins=10, range=None, normed=False, weights=None, density=None): """ Compute the histogram of a set of data. Parameters ---------- a : array_like Input data. The histogram is computed over the flattened array. bins : int or sequence of scalars or str, optional If `bins` is an int, it defines the number of equal-width bins in the given range (10, by default). If `bins` is a sequence, it defines the bin edges, including the rightmost edge, allowing for non-uniform bin widths. .. versionadded:: 1.11.0 If `bins` is a string from the list below, `histogram` will use the method chosen to calculate the optimal number of bins (see Notes for more detail on the estimators). For visualisation, we suggest using the 'auto' option. 'auto' Maximum of the 'sturges' and 'fd' estimators. Provides good all round performance 'fd' (Freedman Diaconis Estimator) Robust (resilient to outliers) estimator that takes into account data variability and data size . 'scott' Less robust estimator that that takes into account data variability and data size. 'rice' Estimator does not take variability into account, only data size. Commonly overestimates number of bins required. 'sturges' R's default method, only accounts for data size. Only optimal for gaussian data and underestimates number of bins for large non-gaussian datasets. range : (float, float), optional The lower and upper range of the bins. If not provided, range is simply ``(a.min(), a.max())``. Values outside the range are ignored. normed : bool, optional This keyword is deprecated in Numpy 1.6 due to confusing/buggy behavior. It will be removed in Numpy 2.0. Use the density keyword instead. If False, the result will contain the number of samples in each bin. If True, the result is the value of the probability *density* function at the bin, normalized such that the *integral* over the range is 1. Note that this latter behavior is known to be buggy with unequal bin widths; use `density` instead. weights : array_like, optional An array of weights, of the same shape as `a`. Each value in `a` only contributes its associated weight towards the bin count (instead of 1). If `normed` is True, the weights are normalized, so that the integral of the density over the range remains 1 density : bool, optional If False, the result will contain the number of samples in each bin. If True, the result is the value of the probability *density* function at the bin, normalized such that the *integral* over the range is 1. Note that the sum of the histogram values will not be equal to 1 unless bins of unity width are chosen; it is not a probability *mass* function. Overrides the `normed` keyword if given. Returns ------- hist : array The values of the histogram. See `normed` and `weights` for a description of the possible semantics. bin_edges : array of dtype float Return the bin edges ``(length(hist)+1)``. See Also -------- histogramdd, bincount, searchsorted, digitize Notes ----- All but the last (righthand-most) bin is half-open. In other words, if `bins` is:: [1, 2, 3, 4] then the first bin is ``[1, 2)`` (including 1, but excluding 2) and the second ``[2, 3)``. The last bin, however, is ``[3, 4]``, which *includes* 4. .. versionadded:: 1.11.0 The methods to estimate the optimal number of bins are well found in literature, and are inspired by the choices R provides for histogram visualisation. Note that having the number of bins proportional to :math:`n^{1/3}` is asymptotically optimal, which is why it appears in most estimators. These are simply plug-in methods that give good starting points for number of bins. In the equations below, :math:`h` is the binwidth and :math:`n_h` is the number of bins 'Auto' (maximum of the 'Sturges' and 'FD' estimators) A compromise to get a good value. For small datasets the sturges value will usually be chosen, while larger datasets will usually default to FD. Avoids the overly conservative behaviour of FD and Sturges for small and large datasets respectively. Switchover point is usually x.size~1000. 'FD' (Freedman Diaconis Estimator) .. math:: h = 2 \\frac{IQR}{n^{-1/3}} The binwidth is proportional to the interquartile range (IQR) and inversely proportional to cube root of a.size. Can be too conservative for small datasets, but is quite good for large datasets. The IQR is very robust to outliers. 'Scott' .. math:: h = \\frac{3.5\\sigma}{n^{-1/3}} The binwidth is proportional to the standard deviation (sd) of the data and inversely proportional to cube root of a.size. Can be too conservative for small datasets, but is quite good for large datasets. The sd is not very robust to outliers. Values are very similar to the Freedman Diaconis Estimator in the absence of outliers. 'Rice' .. math:: n_h = \\left\\lceil 2n^{1/3} \\right\\rceil The number of bins is only proportional to cube root of a.size. It tends to overestimate the number of bins and it does not take into account data variability. 'Sturges' .. math:: n_h = \\left\\lceil \\log _{2}n+1 \\right\\rceil The number of bins is the base2 log of a.size. This estimator assumes normality of data and is too conservative for larger, non-normal datasets. This is the default method in R's `hist` method. Examples -------- >>> np.histogram([1, 2, 1], bins=[0, 1, 2, 3]) (array([0, 2, 1]), array([0, 1, 2, 3])) >>> np.histogram(np.arange(4), bins=np.arange(5), density=True) (array([ 0.25, 0.25, 0.25, 0.25]), array([0, 1, 2, 3, 4])) >>> np.histogram([[1, 2, 1], [1, 0, 1]], bins=[0,1,2,3]) (array([1, 4, 1]), array([0, 1, 2, 3])) >>> a = np.arange(5) >>> hist, bin_edges = np.histogram(a, density=True) >>> hist array([ 0.5, 0. , 0.5, 0. , 0. , 0.5, 0. , 0.5, 0. , 0.5]) >>> hist.sum() 2.4999999999999996 >>> np.sum(hist*np.diff(bin_edges)) 1.0 .. versionadded:: 1.11.0 Automated Bin Selection Methods example, using 2 peak random data with 2000 points >>> import matplotlib.pyplot as plt >>> rng = np.random.RandomState(10) # deterministic random data >>> a = np.hstack((rng.normal(size = 1000), rng.normal(loc = 5, scale = 2, size = 1000))) >>> plt.hist(a, bins = 'auto') # plt.hist passes it's arguments to np.histogram >>> plt.title("Histogram with 'auto' bins") >>> plt.show() """ a = asarray(a) if weights is not None: weights = asarray(weights) if np.any(weights.shape != a.shape): raise ValueError( 'weights should have the same shape as a.') weights = weights.ravel() a = a.ravel() if (range is not None): mn, mx = range if (mn > mx): raise AttributeError( 'max must be larger than min in range parameter.') if isinstance(bins, basestring): bins = _hist_optim_numbins_estimator(a, bins) # if `bins` is a string for an automatic method, # this will replace it with the number of bins calculated # Histogram is an integer or a float array depending on the weights. if weights is None: ntype = np.dtype(np.intp) else: ntype = weights.dtype # We set a block size, as this allows us to iterate over chunks when # computing histograms, to minimize memory usage. BLOCK = 65536 if not iterable(bins): if np.isscalar(bins) and bins < 1: raise ValueError( '`bins` should be a positive integer.') if range is None: if a.size == 0: # handle empty arrays. Can't determine range, so use 0-1. range = (0, 1) else: range = (a.min(), a.max()) mn, mx = [mi + 0.0 for mi in range] if mn == mx: mn -= 0.5 mx += 0.5 # At this point, if the weights are not integer, floating point, or # complex, we have to use the slow algorithm. if weights is not None and not (np.can_cast(weights.dtype, np.double) or np.can_cast(weights.dtype, np.complex)): bins = linspace(mn, mx, bins + 1, endpoint=True) if not iterable(bins): # We now convert values of a to bin indices, under the assumption of # equal bin widths (which is valid here). # Initialize empty histogram n = np.zeros(bins, ntype) # Pre-compute histogram scaling factor norm = bins / (mx - mn) # We iterate over blocks here for two reasons: the first is that for # large arrays, it is actually faster (for example for a 10^8 array it # is 2x as fast) and it results in a memory footprint 3x lower in the # limit of large arrays. for i in arange(0, len(a), BLOCK): tmp_a = a[i:i+BLOCK] if weights is None: tmp_w = None else: tmp_w = weights[i:i + BLOCK] # Only include values in the right range keep = (tmp_a >= mn) keep &= (tmp_a <= mx) if not np.logical_and.reduce(keep): tmp_a = tmp_a[keep] if tmp_w is not None: tmp_w = tmp_w[keep] tmp_a = tmp_a.astype(float) tmp_a -= mn tmp_a *= norm # Compute the bin indices, and for values that lie exactly on mx we # need to subtract one indices = tmp_a.astype(np.intp) indices[indices == bins] -= 1 # We now compute the histogram using bincount if ntype.kind == 'c': n.real += np.bincount(indices, weights=tmp_w.real, minlength=bins) n.imag += np.bincount(indices, weights=tmp_w.imag, minlength=bins) else: n += np.bincount(indices, weights=tmp_w, minlength=bins).astype(ntype) # We now compute the bin edges since these are returned bins = linspace(mn, mx, bins + 1, endpoint=True) else: bins = asarray(bins) if (np.diff(bins) < 0).any(): raise AttributeError( 'bins must increase monotonically.') # Initialize empty histogram n = np.zeros(bins.shape, ntype) if weights is None: for i in arange(0, len(a), BLOCK): sa = sort(a[i:i+BLOCK]) n += np.r_[sa.searchsorted(bins[:-1], 'left'), sa.searchsorted(bins[-1], 'right')] else: zero = array(0, dtype=ntype) for i in arange(0, len(a), BLOCK): tmp_a = a[i:i+BLOCK] tmp_w = weights[i:i+BLOCK] sorting_index = np.argsort(tmp_a) sa = tmp_a[sorting_index] sw = tmp_w[sorting_index] cw = np.concatenate(([zero, ], sw.cumsum())) bin_index = np.r_[sa.searchsorted(bins[:-1], 'left'), sa.searchsorted(bins[-1], 'right')] n += cw[bin_index] n = np.diff(n) if density is not None: if density: db = array(np.diff(bins), float) return n/db/n.sum(), bins else: return n, bins else: # deprecated, buggy behavior. Remove for Numpy 2.0 if normed: db = array(np.diff(bins), float) return n/(n*db).sum(), bins else: return n, bins def histogramdd(sample, bins=10, range=None, normed=False, weights=None): """ Compute the multidimensional histogram of some data. Parameters ---------- sample : array_like The data to be histogrammed. It must be an (N,D) array or data that can be converted to such. The rows of the resulting array are the coordinates of points in a D dimensional polytope. bins : sequence or int, optional The bin specification: * A sequence of arrays describing the bin edges along each dimension. * The number of bins for each dimension (nx, ny, ... =bins) * The number of bins for all dimensions (nx=ny=...=bins). range : sequence, optional A sequence of lower and upper bin edges to be used if the edges are not given explicitly in `bins`. Defaults to the minimum and maximum values along each dimension. normed : bool, optional If False, returns the number of samples in each bin. If True, returns the bin density ``bin_count / sample_count / bin_volume``. weights : (N,) array_like, optional An array of values `w_i` weighing each sample `(x_i, y_i, z_i, ...)`. Weights are normalized to 1 if normed is True. If normed is False, the values of the returned histogram are equal to the sum of the weights belonging to the samples falling into each bin. Returns ------- H : ndarray The multidimensional histogram of sample x. See normed and weights for the different possible semantics. edges : list A list of D arrays describing the bin edges for each dimension. See Also -------- histogram: 1-D histogram histogram2d: 2-D histogram Examples -------- >>> r = np.random.randn(100,3) >>> H, edges = np.histogramdd(r, bins = (5, 8, 4)) >>> H.shape, edges[0].size, edges[1].size, edges[2].size ((5, 8, 4), 6, 9, 5) """ try: # Sample is an ND-array. N, D = sample.shape except (AttributeError, ValueError): # Sample is a sequence of 1D arrays. sample = atleast_2d(sample).T N, D = sample.shape nbin = empty(D, int) edges = D*[None] dedges = D*[None] if weights is not None: weights = asarray(weights) try: M = len(bins) if M != D: raise AttributeError( 'The dimension of bins must be equal to the dimension of the ' ' sample x.') except TypeError: # bins is an integer bins = D*[bins] # Select range for each dimension # Used only if number of bins is given. if range is None: # Handle empty input. Range can't be determined in that case, use 0-1. if N == 0: smin = zeros(D) smax = ones(D) else: smin = atleast_1d(array(sample.min(0), float)) smax = atleast_1d(array(sample.max(0), float)) else: smin = zeros(D) smax = zeros(D) for i in arange(D): smin[i], smax[i] = range[i] # Make sure the bins have a finite width. for i in arange(len(smin)): if smin[i] == smax[i]: smin[i] = smin[i] - .5 smax[i] = smax[i] + .5 # avoid rounding issues for comparisons when dealing with inexact types if np.issubdtype(sample.dtype, np.inexact): edge_dt = sample.dtype else: edge_dt = float # Create edge arrays for i in arange(D): if isscalar(bins[i]): if bins[i] < 1: raise ValueError( "Element at index %s in `bins` should be a positive " "integer." % i) nbin[i] = bins[i] + 2 # +2 for outlier bins edges[i] = linspace(smin[i], smax[i], nbin[i]-1, dtype=edge_dt) else: edges[i] = asarray(bins[i], edge_dt) nbin[i] = len(edges[i]) + 1 # +1 for outlier bins dedges[i] = diff(edges[i]) if np.any(np.asarray(dedges[i]) <= 0): raise ValueError( "Found bin edge of size <= 0. Did you specify `bins` with" "non-monotonic sequence?") nbin = asarray(nbin) # Handle empty input. if N == 0: return np.zeros(nbin-2), edges # Compute the bin number each sample falls into. Ncount = {} for i in arange(D): Ncount[i] = digitize(sample[:, i], edges[i]) # Using digitize, values that fall on an edge are put in the right bin. # For the rightmost bin, we want values equal to the right edge to be # counted in the last bin, and not as an outlier. for i in arange(D): # Rounding precision mindiff = dedges[i].min() if not np.isinf(mindiff): decimal = int(-log10(mindiff)) + 6 # Find which points are on the rightmost edge. not_smaller_than_edge = (sample[:, i] >= edges[i][-1]) on_edge = (around(sample[:, i], decimal) == around(edges[i][-1], decimal)) # Shift these points one bin to the left. Ncount[i][where(on_edge & not_smaller_than_edge)[0]] -= 1 # Flattened histogram matrix (1D) # Reshape is used so that overlarge arrays # will raise an error. hist = zeros(nbin, float).reshape(-1) # Compute the sample indices in the flattened histogram matrix. ni = nbin.argsort() xy = zeros(N, int) for i in arange(0, D-1): xy += Ncount[ni[i]] * nbin[ni[i+1:]].prod() xy += Ncount[ni[-1]] # Compute the number of repetitions in xy and assign it to the # flattened histmat. if len(xy) == 0: return zeros(nbin-2, int), edges flatcount = bincount(xy, weights) a = arange(len(flatcount)) hist[a] = flatcount # Shape into a proper matrix hist = hist.reshape(sort(nbin)) for i in arange(nbin.size): j = ni.argsort()[i] hist = hist.swapaxes(i, j) ni[i], ni[j] = ni[j], ni[i] # Remove outliers (indices 0 and -1 for each dimension). core = D*[slice(1, -1)] hist = hist[core] # Normalize if normed is True if normed: s = hist.sum() for i in arange(D): shape = ones(D, int) shape[i] = nbin[i] - 2 hist = hist / dedges[i].reshape(shape) hist /= s if (hist.shape != nbin - 2).any(): raise RuntimeError( "Internal Shape Error") return hist, edges def average(a, axis=None, weights=None, returned=False): """ Compute the weighted average along the specified axis. Parameters ---------- a : array_like Array containing data to be averaged. If `a` is not an array, a conversion is attempted. axis : int, optional Axis along which to average `a`. If `None`, averaging is done over the flattened array. weights : array_like, optional An array of weights associated with the values in `a`. Each value in `a` contributes to the average according to its associated weight. The weights array can either be 1-D (in which case its length must be the size of `a` along the given axis) or of the same shape as `a`. If `weights=None`, then all data in `a` are assumed to have a weight equal to one. returned : bool, optional Default is `False`. If `True`, the tuple (`average`, `sum_of_weights`) is returned, otherwise only the average is returned. If `weights=None`, `sum_of_weights` is equivalent to the number of elements over which the average is taken. Returns ------- average, [sum_of_weights] : array_type or double Return the average along the specified axis. When returned is `True`, return a tuple with the average as the first element and the sum of the weights as the second element. The return type is `Float` if `a` is of integer type, otherwise it is of the same type as `a`. `sum_of_weights` is of the same type as `average`. Raises ------ ZeroDivisionError When all weights along axis are zero. See `numpy.ma.average` for a version robust to this type of error. TypeError When the length of 1D `weights` is not the same as the shape of `a` along axis. See Also -------- mean ma.average : average for masked arrays -- useful if your data contains "missing" values Examples -------- >>> data = range(1,5) >>> data [1, 2, 3, 4] >>> np.average(data) 2.5 >>> np.average(range(1,11), weights=range(10,0,-1)) 4.0 >>> data = np.arange(6).reshape((3,2)) >>> data array([[0, 1], [2, 3], [4, 5]]) >>> np.average(data, axis=1, weights=[1./4, 3./4]) array([ 0.75, 2.75, 4.75]) >>> np.average(data, weights=[1./4, 3./4]) Traceback (most recent call last): ... TypeError: Axis must be specified when shapes of a and weights differ. """ if not isinstance(a, np.matrix): a = np.asarray(a) if weights is None: avg = a.mean(axis) scl = avg.dtype.type(a.size/avg.size) else: a = a + 0.0 wgt = np.asarray(weights) # Sanity checks if a.shape != wgt.shape: if axis is None: raise TypeError( "Axis must be specified when shapes of a and weights " "differ.") if wgt.ndim != 1: raise TypeError( "1D weights expected when shapes of a and weights differ.") if wgt.shape[0] != a.shape[axis]: raise ValueError( "Length of weights not compatible with specified axis.") # setup wgt to broadcast along axis wgt = np.array(wgt, copy=0, ndmin=a.ndim).swapaxes(-1, axis) scl = wgt.sum(axis=axis, dtype=np.result_type(a.dtype, wgt.dtype)) if (scl == 0.0).any(): raise ZeroDivisionError( "Weights sum to zero, can't be normalized") avg = np.multiply(a, wgt).sum(axis)/scl if returned: scl = np.multiply(avg, 0) + scl return avg, scl else: return avg def asarray_chkfinite(a, dtype=None, order=None): """Convert the input to an array, checking for NaNs or Infs. Parameters ---------- a : array_like Input data, in any form that can be converted to an array. This includes lists, lists of tuples, tuples, tuples of tuples, tuples of lists and ndarrays. Success requires no NaNs or Infs. dtype : data-type, optional By default, the data-type is inferred from the input data. order : {'C', 'F'}, optional Whether to use row-major (C-style) or column-major (Fortran-style) memory representation. Defaults to 'C'. Returns ------- out : ndarray Array interpretation of `a`. No copy is performed if the input is already an ndarray. If `a` is a subclass of ndarray, a base class ndarray is returned. Raises ------ ValueError Raises ValueError if `a` contains NaN (Not a Number) or Inf (Infinity). See Also -------- asarray : Create and array. asanyarray : Similar function which passes through subclasses. ascontiguousarray : Convert input to a contiguous array. asfarray : Convert input to a floating point ndarray. asfortranarray : Convert input to an ndarray with column-major memory order. fromiter : Create an array from an iterator. fromfunction : Construct an array by executing a function on grid positions. Examples -------- Convert a list into an array. If all elements are finite ``asarray_chkfinite`` is identical to ``asarray``. >>> a = [1, 2] >>> np.asarray_chkfinite(a, dtype=float) array([1., 2.]) Raises ValueError if array_like contains Nans or Infs. >>> a = [1, 2, np.inf] >>> try: ... np.asarray_chkfinite(a) ... except ValueError: ... print 'ValueError' ... ValueError """ a = asarray(a, dtype=dtype, order=order) if a.dtype.char in typecodes['AllFloat'] and not np.isfinite(a).all(): raise ValueError( "array must not contain infs or NaNs") return a def piecewise(x, condlist, funclist, *args, **kw): """ Evaluate a piecewise-defined function. Given a set of conditions and corresponding functions, evaluate each function on the input data wherever its condition is true. Parameters ---------- x : ndarray The input domain. condlist : list of bool arrays Each boolean array corresponds to a function in `funclist`. Wherever `condlist[i]` is True, `funclist[i](x)` is used as the output value. Each boolean array in `condlist` selects a piece of `x`, and should therefore be of the same shape as `x`. The length of `condlist` must correspond to that of `funclist`. If one extra function is given, i.e. if ``len(funclist) - len(condlist) == 1``, then that extra function is the default value, used wherever all conditions are false. funclist : list of callables, f(x,*args,**kw), or scalars Each function is evaluated over `x` wherever its corresponding condition is True. It should take an array as input and give an array or a scalar value as output. If, instead of a callable, a scalar is provided then a constant function (``lambda x: scalar``) is assumed. args : tuple, optional Any further arguments given to `piecewise` are passed to the functions upon execution, i.e., if called ``piecewise(..., ..., 1, 'a')``, then each function is called as ``f(x, 1, 'a')``. kw : dict, optional Keyword arguments used in calling `piecewise` are passed to the functions upon execution, i.e., if called ``piecewise(..., ..., lambda=1)``, then each function is called as ``f(x, lambda=1)``. Returns ------- out : ndarray The output is the same shape and type as x and is found by calling the functions in `funclist` on the appropriate portions of `x`, as defined by the boolean arrays in `condlist`. Portions not covered by any condition have a default value of 0. See Also -------- choose, select, where Notes ----- This is similar to choose or select, except that functions are evaluated on elements of `x` that satisfy the corresponding condition from `condlist`. The result is:: |-- |funclist[0](x[condlist[0]]) out = |funclist[1](x[condlist[1]]) |... |funclist[n2](x[condlist[n2]]) |-- Examples -------- Define the sigma function, which is -1 for ``x < 0`` and +1 for ``x >= 0``. >>> x = np.linspace(-2.5, 2.5, 6) >>> np.piecewise(x, [x < 0, x >= 0], [-1, 1]) array([-1., -1., -1., 1., 1., 1.]) Define the absolute value, which is ``-x`` for ``x <0`` and ``x`` for ``x >= 0``. >>> np.piecewise(x, [x < 0, x >= 0], [lambda x: -x, lambda x: x]) array([ 2.5, 1.5, 0.5, 0.5, 1.5, 2.5]) """ x = asanyarray(x) n2 = len(funclist) if (isscalar(condlist) or not (isinstance(condlist[0], list) or isinstance(condlist[0], ndarray))): condlist = [condlist] condlist = array(condlist, dtype=bool) n = len(condlist) # This is a hack to work around problems with NumPy's # handling of 0-d arrays and boolean indexing with # numpy.bool_ scalars zerod = False if x.ndim == 0: x = x[None] zerod = True if condlist.shape[-1] != 1: condlist = condlist.T if n == n2 - 1: # compute the "otherwise" condition. totlist = np.logical_or.reduce(condlist, axis=0) condlist = np.vstack([condlist, ~totlist]) n += 1 if (n != n2): raise ValueError( "function list and condition list must be the same") y = zeros(x.shape, x.dtype) for k in range(n): item = funclist[k] if not isinstance(item, collections.Callable): y[condlist[k]] = item else: vals = x[condlist[k]] if vals.size > 0: y[condlist[k]] = item(vals, *args, **kw) if zerod: y = y.squeeze() return y def select(condlist, choicelist, default=0): """ Return an array drawn from elements in choicelist, depending on conditions. Parameters ---------- condlist : list of bool ndarrays The list of conditions which determine from which array in `choicelist` the output elements are taken. When multiple conditions are satisfied, the first one encountered in `condlist` is used. choicelist : list of ndarrays The list of arrays from which the output elements are taken. It has to be of the same length as `condlist`. default : scalar, optional The element inserted in `output` when all conditions evaluate to False. Returns ------- output : ndarray The output at position m is the m-th element of the array in `choicelist` where the m-th element of the corresponding array in `condlist` is True. See Also -------- where : Return elements from one of two arrays depending on condition. take, choose, compress, diag, diagonal Examples -------- >>> x = np.arange(10) >>> condlist = [x<3, x>5] >>> choicelist = [x, x**2] >>> np.select(condlist, choicelist) array([ 0, 1, 2, 0, 0, 0, 36, 49, 64, 81]) """ # Check the size of condlist and choicelist are the same, or abort. if len(condlist) != len(choicelist): raise ValueError( 'list of cases must be same length as list of conditions') # Now that the dtype is known, handle the deprecated select([], []) case if len(condlist) == 0: # 2014-02-24, 1.9 warnings.warn("select with an empty condition list is not possible" "and will be deprecated", DeprecationWarning) return np.asarray(default)[()] choicelist = [np.asarray(choice) for choice in choicelist] choicelist.append(np.asarray(default)) # need to get the result type before broadcasting for correct scalar # behaviour dtype = np.result_type(*choicelist) # Convert conditions to arrays and broadcast conditions and choices # as the shape is needed for the result. Doing it seperatly optimizes # for example when all choices are scalars. condlist = np.broadcast_arrays(*condlist) choicelist = np.broadcast_arrays(*choicelist) # If cond array is not an ndarray in boolean format or scalar bool, abort. deprecated_ints = False for i in range(len(condlist)): cond = condlist[i] if cond.dtype.type is not np.bool_: if np.issubdtype(cond.dtype, np.integer): # A previous implementation accepted int ndarrays accidentally. # Supported here deliberately, but deprecated. condlist[i] = condlist[i].astype(bool) deprecated_ints = True else: raise ValueError( 'invalid entry in choicelist: should be boolean ndarray') if deprecated_ints: # 2014-02-24, 1.9 msg = "select condlists containing integer ndarrays is deprecated " \ "and will be removed in the future. Use `.astype(bool)` to " \ "convert to bools." warnings.warn(msg, DeprecationWarning) if choicelist[0].ndim == 0: # This may be common, so avoid the call. result_shape = condlist[0].shape else: result_shape = np.broadcast_arrays(condlist[0], choicelist[0])[0].shape result = np.full(result_shape, choicelist[-1], dtype) # Use np.copyto to burn each choicelist array onto result, using the # corresponding condlist as a boolean mask. This is done in reverse # order since the first choice should take precedence. choicelist = choicelist[-2::-1] condlist = condlist[::-1] for choice, cond in zip(choicelist, condlist): np.copyto(result, choice, where=cond) return result def copy(a, order='K'): """ Return an array copy of the given object. Parameters ---------- a : array_like Input data. order : {'C', 'F', 'A', 'K'}, optional Controls the memory layout of the copy. 'C' means C-order, 'F' means F-order, 'A' means 'F' if `a` is Fortran contiguous, 'C' otherwise. 'K' means match the layout of `a` as closely as possible. (Note that this function and :meth:ndarray.copy are very similar, but have different default values for their order= arguments.) Returns ------- arr : ndarray Array interpretation of `a`. Notes ----- This is equivalent to >>> np.array(a, copy=True) #doctest: +SKIP Examples -------- Create an array x, with a reference y and a copy z: >>> x = np.array([1, 2, 3]) >>> y = x >>> z = np.copy(x) Note that, when we modify x, y changes, but not z: >>> x[0] = 10 >>> x[0] == y[0] True >>> x[0] == z[0] False """ return array(a, order=order, copy=True) # Basic operations def gradient(f, *varargs, **kwargs): """ Return the gradient of an N-dimensional array. The gradient is computed using second order accurate central differences in the interior and either first differences or second order accurate one-sides (forward or backwards) differences at the boundaries. The returned gradient hence has the same shape as the input array. Parameters ---------- f : array_like An N-dimensional array containing samples of a scalar function. varargs : scalar or list of scalar, optional N scalars specifying the sample distances for each dimension, i.e. `dx`, `dy`, `dz`, ... Default distance: 1. single scalar specifies sample distance for all dimensions. if `axis` is given, the number of varargs must equal the number of axes. edge_order : {1, 2}, optional Gradient is calculated using N\ :sup:`th` order accurate differences at the boundaries. Default: 1. .. versionadded:: 1.9.1 axis : None or int or tuple of ints, optional Gradient is calculated only along the given axis or axes The default (axis = None) is to calculate the gradient for all the axes of the input array. axis may be negative, in which case it counts from the last to the first axis. .. versionadded:: 1.11.0 Returns ------- gradient : list of ndarray Each element of `list` has the same shape as `f` giving the derivative of `f` with respect to each dimension. Examples -------- >>> x = np.array([1, 2, 4, 7, 11, 16], dtype=np.float) >>> np.gradient(x) array([ 1. , 1.5, 2.5, 3.5, 4.5, 5. ]) >>> np.gradient(x, 2) array([ 0.5 , 0.75, 1.25, 1.75, 2.25, 2.5 ]) For two dimensional arrays, the return will be two arrays ordered by axis. In this example the first array stands for the gradient in rows and the second one in columns direction: >>> np.gradient(np.array([[1, 2, 6], [3, 4, 5]], dtype=np.float)) [array([[ 2., 2., -1.], [ 2., 2., -1.]]), array([[ 1. , 2.5, 4. ], [ 1. , 1. , 1. ]])] >>> x = np.array([0, 1, 2, 3, 4]) >>> dx = np.gradient(x) >>> y = x**2 >>> np.gradient(y, dx, edge_order=2) array([-0., 2., 4., 6., 8.]) The axis keyword can be used to specify a subset of axes of which the gradient is calculated >>> np.gradient(np.array([[1, 2, 6], [3, 4, 5]], dtype=np.float), axis=0) array([[ 2., 2., -1.], [ 2., 2., -1.]]) """ f = np.asanyarray(f) N = len(f.shape) # number of dimensions axes = kwargs.pop('axis', None) if axes is None: axes = tuple(range(N)) # check axes to have correct type and no duplicate entries if isinstance(axes, int): axes = (axes,) if not isinstance(axes, tuple): raise TypeError("A tuple of integers or a single integer is required") # normalize axis values: axes = tuple(x + N if x < 0 else x for x in axes) if max(axes) >= N or min(axes) < 0: raise ValueError("'axis' entry is out of bounds") if len(set(axes)) != len(axes): raise ValueError("duplicate value in 'axis'") n = len(varargs) if n == 0: dx = [1.0]*N elif n == 1: dx = [varargs[0]]*N elif n == len(axes): dx = list(varargs) else: raise SyntaxError( "invalid number of arguments") edge_order = kwargs.pop('edge_order', 1) if kwargs: raise TypeError('"{}" are not valid keyword arguments.'.format( '", "'.join(kwargs.keys()))) if edge_order > 2: raise ValueError("'edge_order' greater than 2 not supported") # use central differences on interior and one-sided differences on the # endpoints. This preserves second order-accuracy over the full domain. outvals = [] # create slice objects --- initially all are [:, :, ..., :] slice1 = [slice(None)]*N slice2 = [slice(None)]*N slice3 = [slice(None)]*N slice4 = [slice(None)]*N otype = f.dtype.char if otype not in ['f', 'd', 'F', 'D', 'm', 'M']: otype = 'd' # Difference of datetime64 elements results in timedelta64 if otype == 'M': # Need to use the full dtype name because it contains unit information otype = f.dtype.name.replace('datetime', 'timedelta') elif otype == 'm': # Needs to keep the specific units, can't be a general unit otype = f.dtype # Convert datetime64 data into ints. Make dummy variable `y` # that is a view of ints if the data is datetime64, otherwise # just set y equal to the the array `f`. if f.dtype.char in ["M", "m"]: y = f.view('int64') else: y = f for i, axis in enumerate(axes): if y.shape[axis] < 2: raise ValueError( "Shape of array too small to calculate a numerical gradient, " "at least two elements are required.") # Numerical differentiation: 1st order edges, 2nd order interior if y.shape[axis] == 2 or edge_order == 1: # Use first order differences for time data out = np.empty_like(y, dtype=otype) slice1[axis] = slice(1, -1) slice2[axis] = slice(2, None) slice3[axis] = slice(None, -2) # 1D equivalent -- out[1:-1] = (y[2:] - y[:-2])/2.0 out[slice1] = (y[slice2] - y[slice3])/2.0 slice1[axis] = 0 slice2[axis] = 1 slice3[axis] = 0 # 1D equivalent -- out[0] = (y[1] - y[0]) out[slice1] = (y[slice2] - y[slice3]) slice1[axis] = -1 slice2[axis] = -1 slice3[axis] = -2 # 1D equivalent -- out[-1] = (y[-1] - y[-2]) out[slice1] = (y[slice2] - y[slice3]) # Numerical differentiation: 2st order edges, 2nd order interior else: # Use second order differences where possible out = np.empty_like(y, dtype=otype) slice1[axis] = slice(1, -1) slice2[axis] = slice(2, None) slice3[axis] = slice(None, -2) # 1D equivalent -- out[1:-1] = (y[2:] - y[:-2])/2.0 out[slice1] = (y[slice2] - y[slice3])/2.0 slice1[axis] = 0 slice2[axis] = 0 slice3[axis] = 1 slice4[axis] = 2 # 1D equivalent -- out[0] = -(3*y[0] - 4*y[1] + y[2]) / 2.0 out[slice1] = -(3.0*y[slice2] - 4.0*y[slice3] + y[slice4])/2.0 slice1[axis] = -1 slice2[axis] = -1 slice3[axis] = -2 slice4[axis] = -3 # 1D equivalent -- out[-1] = (3*y[-1] - 4*y[-2] + y[-3]) out[slice1] = (3.0*y[slice2] - 4.0*y[slice3] + y[slice4])/2.0 # divide by step size out /= dx[i] outvals.append(out) # reset the slice object in this dimension to ":" slice1[axis] = slice(None) slice2[axis] = slice(None) slice3[axis] = slice(None) slice4[axis] = slice(None) if len(axes) == 1: return outvals[0] else: return outvals def diff(a, n=1, axis=-1): """ Calculate the n-th discrete difference along given axis. The first difference is given by ``out[n] = a[n+1] - a[n]`` along the given axis, higher differences are calculated by using `diff` recursively. Parameters ---------- a : array_like Input array n : int, optional The number of times values are differenced. axis : int, optional The axis along which the difference is taken, default is the last axis. Returns ------- diff : ndarray The n-th differences. The shape of the output is the same as `a` except along `axis` where the dimension is smaller by `n`. . See Also -------- gradient, ediff1d, cumsum Examples -------- >>> x = np.array([1, 2, 4, 7, 0]) >>> np.diff(x) array([ 1, 2, 3, -7]) >>> np.diff(x, n=2) array([ 1, 1, -10]) >>> x = np.array([[1, 3, 6, 10], [0, 5, 6, 8]]) >>> np.diff(x) array([[2, 3, 4], [5, 1, 2]]) >>> np.diff(x, axis=0) array([[-1, 2, 0, -2]]) """ if n == 0: return a if n < 0: raise ValueError( "order must be non-negative but got " + repr(n)) a = asanyarray(a) nd = len(a.shape) slice1 = [slice(None)]*nd slice2 = [slice(None)]*nd slice1[axis] = slice(1, None) slice2[axis] = slice(None, -1) slice1 = tuple(slice1) slice2 = tuple(slice2) if n > 1: return diff(a[slice1]-a[slice2], n-1, axis=axis) else: return a[slice1]-a[slice2] def interp(x, xp, fp, left=None, right=None, period=None): """ One-dimensional linear interpolation. Returns the one-dimensional piecewise linear interpolant to a function with given values at discrete data-points. Parameters ---------- x : array_like The x-coordinates of the interpolated values. xp : 1-D sequence of floats The x-coordinates of the data points, must be increasing if argument `period` is not specified. Otherwise, `xp` is internally sorted after normalizing the periodic boundaries with ``xp = xp % period``. fp : 1-D sequence of floats The y-coordinates of the data points, same length as `xp`. left : float, optional Value to return for `x < xp[0]`, default is `fp[0]`. right : float, optional Value to return for `x > xp[-1]`, default is `fp[-1]`. period : None or float, optional A period for the x-coordinates. This parameter allows the proper interpolation of angular x-coordinates. Parameters `left` and `right` are ignored if `period` is specified. .. versionadded:: 1.10.0 Returns ------- y : float or ndarray The interpolated values, same shape as `x`. Raises ------ ValueError If `xp` and `fp` have different length If `xp` or `fp` are not 1-D sequences If `period == 0` Notes ----- Does not check that the x-coordinate sequence `xp` is increasing. If `xp` is not increasing, the results are nonsense. A simple check for increasing is:: np.all(np.diff(xp) > 0) Examples -------- >>> xp = [1, 2, 3] >>> fp = [3, 2, 0] >>> np.interp(2.5, xp, fp) 1.0 >>> np.interp([0, 1, 1.5, 2.72, 3.14], xp, fp) array([ 3. , 3. , 2.5 , 0.56, 0. ]) >>> UNDEF = -99.0 >>> np.interp(3.14, xp, fp, right=UNDEF) -99.0 Plot an interpolant to the sine function: >>> x = np.linspace(0, 2*np.pi, 10) >>> y = np.sin(x) >>> xvals = np.linspace(0, 2*np.pi, 50) >>> yinterp = np.interp(xvals, x, y) >>> import matplotlib.pyplot as plt >>> plt.plot(x, y, 'o') [<matplotlib.lines.Line2D object at 0x...>] >>> plt.plot(xvals, yinterp, '-x') [<matplotlib.lines.Line2D object at 0x...>] >>> plt.show() Interpolation with periodic x-coordinates: >>> x = [-180, -170, -185, 185, -10, -5, 0, 365] >>> xp = [190, -190, 350, -350] >>> fp = [5, 10, 3, 4] >>> np.interp(x, xp, fp, period=360) array([7.5, 5., 8.75, 6.25, 3., 3.25, 3.5, 3.75]) """ if period is None: if isinstance(x, (float, int, number)): return compiled_interp([x], xp, fp, left, right).item() elif isinstance(x, np.ndarray) and x.ndim == 0: return compiled_interp([x], xp, fp, left, right).item() else: return compiled_interp(x, xp, fp, left, right) else: if period == 0: raise ValueError("period must be a non-zero value") period = abs(period) left = None right = None return_array = True if isinstance(x, (float, int, number)): return_array = False x = [x] x = np.asarray(x, dtype=np.float64) xp = np.asarray(xp, dtype=np.float64) fp = np.asarray(fp, dtype=np.float64) if xp.ndim != 1 or fp.ndim != 1: raise ValueError("Data points must be 1-D sequences") if xp.shape[0] != fp.shape[0]: raise ValueError("fp and xp are not of the same length") # normalizing periodic boundaries x = x % period xp = xp % period asort_xp = np.argsort(xp) xp = xp[asort_xp] fp = fp[asort_xp] xp = np.concatenate((xp[-1:]-period, xp, xp[0:1]+period)) fp = np.concatenate((fp[-1:], fp, fp[0:1])) if return_array: return compiled_interp(x, xp, fp, left, right) else: return compiled_interp(x, xp, fp, left, right).item() def angle(z, deg=0): """ Return the angle of the complex argument. Parameters ---------- z : array_like A complex number or sequence of complex numbers. deg : bool, optional Return angle in degrees if True, radians if False (default). Returns ------- angle : ndarray or scalar The counterclockwise angle from the positive real axis on the complex plane, with dtype as numpy.float64. See Also -------- arctan2 absolute Examples -------- >>> np.angle([1.0, 1.0j, 1+1j]) # in radians array([ 0. , 1.57079633, 0.78539816]) >>> np.angle(1+1j, deg=True) # in degrees 45.0 """ if deg: fact = 180/pi else: fact = 1.0 z = asarray(z) if (issubclass(z.dtype.type, _nx.complexfloating)): zimag = z.imag zreal = z.real else: zimag = 0 zreal = z return arctan2(zimag, zreal) * fact def unwrap(p, discont=pi, axis=-1): """ Unwrap by changing deltas between values to 2*pi complement. Unwrap radian phase `p` by changing absolute jumps greater than `discont` to their 2*pi complement along the given axis. Parameters ---------- p : array_like Input array. discont : float, optional Maximum discontinuity between values, default is ``pi``. axis : int, optional Axis along which unwrap will operate, default is the last axis. Returns ------- out : ndarray Output array. See Also -------- rad2deg, deg2rad Notes ----- If the discontinuity in `p` is smaller than ``pi``, but larger than `discont`, no unwrapping is done because taking the 2*pi complement would only make the discontinuity larger. Examples -------- >>> phase = np.linspace(0, np.pi, num=5) >>> phase[3:] += np.pi >>> phase array([ 0. , 0.78539816, 1.57079633, 5.49778714, 6.28318531]) >>> np.unwrap(phase) array([ 0. , 0.78539816, 1.57079633, -0.78539816, 0. ]) """ p = asarray(p) nd = len(p.shape) dd = diff(p, axis=axis) slice1 = [slice(None, None)]*nd # full slices slice1[axis] = slice(1, None) ddmod = mod(dd + pi, 2*pi) - pi _nx.copyto(ddmod, pi, where=(ddmod == -pi) & (dd > 0)) ph_correct = ddmod - dd _nx.copyto(ph_correct, 0, where=abs(dd) < discont) up = array(p, copy=True, dtype='d') up[slice1] = p[slice1] + ph_correct.cumsum(axis) return up def sort_complex(a): """ Sort a complex array using the real part first, then the imaginary part. Parameters ---------- a : array_like Input array Returns ------- out : complex ndarray Always returns a sorted complex array. Examples -------- >>> np.sort_complex([5, 3, 6, 2, 1]) array([ 1.+0.j, 2.+0.j, 3.+0.j, 5.+0.j, 6.+0.j]) >>> np.sort_complex([1 + 2j, 2 - 1j, 3 - 2j, 3 - 3j, 3 + 5j]) array([ 1.+2.j, 2.-1.j, 3.-3.j, 3.-2.j, 3.+5.j]) """ b = array(a, copy=True) b.sort() if not issubclass(b.dtype.type, _nx.complexfloating): if b.dtype.char in 'bhBH': return b.astype('F') elif b.dtype.char == 'g': return b.astype('G') else: return b.astype('D') else: return b def trim_zeros(filt, trim='fb'): """ Trim the leading and/or trailing zeros from a 1-D array or sequence. Parameters ---------- filt : 1-D array or sequence Input array. trim : str, optional A string with 'f' representing trim from front and 'b' to trim from back. Default is 'fb', trim zeros from both front and back of the array. Returns ------- trimmed : 1-D array or sequence The result of trimming the input. The input data type is preserved. Examples -------- >>> a = np.array((0, 0, 0, 1, 2, 3, 0, 2, 1, 0)) >>> np.trim_zeros(a) array([1, 2, 3, 0, 2, 1]) >>> np.trim_zeros(a, 'b') array([0, 0, 0, 1, 2, 3, 0, 2, 1]) The input data type is preserved, list/tuple in means list/tuple out. >>> np.trim_zeros([0, 1, 2, 0]) [1, 2] """ first = 0 trim = trim.upper() if 'F' in trim: for i in filt: if i != 0.: break else: first = first + 1 last = len(filt) if 'B' in trim: for i in filt[::-1]: if i != 0.: break else: last = last - 1 return filt[first:last] @deprecate def unique(x): """ This function is deprecated. Use numpy.lib.arraysetops.unique() instead. """ try: tmp = x.flatten() if tmp.size == 0: return tmp tmp.sort() idx = concatenate(([True], tmp[1:] != tmp[:-1])) return tmp[idx] except AttributeError: items = sorted(set(x)) return asarray(items) def extract(condition, arr): """ Return the elements of an array that satisfy some condition. This is equivalent to ``np.compress(ravel(condition), ravel(arr))``. If `condition` is boolean ``np.extract`` is equivalent to ``arr[condition]``. Note that `place` does the exact opposite of `extract`. Parameters ---------- condition : array_like An array whose nonzero or True entries indicate the elements of `arr` to extract. arr : array_like Input array of the same size as `condition`. Returns ------- extract : ndarray Rank 1 array of values from `arr` where `condition` is True. See Also -------- take, put, copyto, compress, place Examples -------- >>> arr = np.arange(12).reshape((3, 4)) >>> arr array([[ 0, 1, 2, 3], [ 4, 5, 6, 7], [ 8, 9, 10, 11]]) >>> condition = np.mod(arr, 3)==0 >>> condition array([[ True, False, False, True], [False, False, True, False], [False, True, False, False]], dtype=bool) >>> np.extract(condition, arr) array([0, 3, 6, 9]) If `condition` is boolean: >>> arr[condition] array([0, 3, 6, 9]) """ return _nx.take(ravel(arr), nonzero(ravel(condition))[0]) def place(arr, mask, vals): """ Change elements of an array based on conditional and input values. Similar to ``np.copyto(arr, vals, where=mask)``, the difference is that `place` uses the first N elements of `vals`, where N is the number of True values in `mask`, while `copyto` uses the elements where `mask` is True. Note that `extract` does the exact opposite of `place`. Parameters ---------- arr : array_like Array to put data into. mask : array_like Boolean mask array. Must have the same size as `a`. vals : 1-D sequence Values to put into `a`. Only the first N elements are used, where N is the number of True values in `mask`. If `vals` is smaller than N it will be repeated. See Also -------- copyto, put, take, extract Examples -------- >>> arr = np.arange(6).reshape(2, 3) >>> np.place(arr, arr>2, [44, 55]) >>> arr array([[ 0, 1, 2], [44, 55, 44]]) """ return _insert(arr, mask, vals) def disp(mesg, device=None, linefeed=True): """ Display a message on a device. Parameters ---------- mesg : str Message to display. device : object Device to write message. If None, defaults to ``sys.stdout`` which is very similar to ``print``. `device` needs to have ``write()`` and ``flush()`` methods. linefeed : bool, optional Option whether to print a line feed or not. Defaults to True. Raises ------ AttributeError If `device` does not have a ``write()`` or ``flush()`` method. Examples -------- Besides ``sys.stdout``, a file-like object can also be used as it has both required methods: >>> from StringIO import StringIO >>> buf = StringIO() >>> np.disp('"Display" in a file', device=buf) >>> buf.getvalue() '"Display" in a file\\n' """ if device is None: device = sys.stdout if linefeed: device.write('%s\n' % mesg) else: device.write('%s' % mesg) device.flush() return class vectorize(object): """ vectorize(pyfunc, otypes='', doc=None, excluded=None, cache=False) Generalized function class. Define a vectorized function which takes a nested sequence of objects or numpy arrays as inputs and returns a numpy array as output. The vectorized function evaluates `pyfunc` over successive tuples of the input arrays like the python map function, except it uses the broadcasting rules of numpy. The data type of the output of `vectorized` is determined by calling the function with the first element of the input. This can be avoided by specifying the `otypes` argument. Parameters ---------- pyfunc : callable A python function or method. otypes : str or list of dtypes, optional The output data type. It must be specified as either a string of typecode characters or a list of data type specifiers. There should be one data type specifier for each output. doc : str, optional The docstring for the function. If `None`, the docstring will be the ``pyfunc.__doc__``. excluded : set, optional Set of strings or integers representing the positional or keyword arguments for which the function will not be vectorized. These will be passed directly to `pyfunc` unmodified. .. versionadded:: 1.7.0 cache : bool, optional If `True`, then cache the first function call that determines the number of outputs if `otypes` is not provided. .. versionadded:: 1.7.0 Returns ------- vectorized : callable Vectorized function. Examples -------- >>> def myfunc(a, b): ... "Return a-b if a>b, otherwise return a+b" ... if a > b: ... return a - b ... else: ... return a + b >>> vfunc = np.vectorize(myfunc) >>> vfunc([1, 2, 3, 4], 2) array([3, 4, 1, 2]) The docstring is taken from the input function to `vectorize` unless it is specified >>> vfunc.__doc__ 'Return a-b if a>b, otherwise return a+b' >>> vfunc = np.vectorize(myfunc, doc='Vectorized `myfunc`') >>> vfunc.__doc__ 'Vectorized `myfunc`' The output type is determined by evaluating the first element of the input, unless it is specified >>> out = vfunc([1, 2, 3, 4], 2) >>> type(out[0]) <type 'numpy.int32'> >>> vfunc = np.vectorize(myfunc, otypes=[np.float]) >>> out = vfunc([1, 2, 3, 4], 2) >>> type(out[0]) <type 'numpy.float64'> The `excluded` argument can be used to prevent vectorizing over certain arguments. This can be useful for array-like arguments of a fixed length such as the coefficients for a polynomial as in `polyval`: >>> def mypolyval(p, x): ... _p = list(p) ... res = _p.pop(0) ... while _p: ... res = res*x + _p.pop(0) ... return res >>> vpolyval = np.vectorize(mypolyval, excluded=['p']) >>> vpolyval(p=[1, 2, 3], x=[0, 1]) array([3, 6]) Positional arguments may also be excluded by specifying their position: >>> vpolyval.excluded.add(0) >>> vpolyval([1, 2, 3], x=[0, 1]) array([3, 6]) Notes ----- The `vectorize` function is provided primarily for convenience, not for performance. The implementation is essentially a for loop. If `otypes` is not specified, then a call to the function with the first argument will be used to determine the number of outputs. The results of this call will be cached if `cache` is `True` to prevent calling the function twice. However, to implement the cache, the original function must be wrapped which will slow down subsequent calls, so only do this if your function is expensive. The new keyword argument interface and `excluded` argument support further degrades performance. """ def __init__(self, pyfunc, otypes='', doc=None, excluded=None, cache=False): self.pyfunc = pyfunc self.cache = cache self._ufunc = None # Caching to improve default performance if doc is None: self.__doc__ = pyfunc.__doc__ else: self.__doc__ = doc if isinstance(otypes, str): self.otypes = otypes for char in self.otypes: if char not in typecodes['All']: raise ValueError( "Invalid otype specified: %s" % (char,)) elif iterable(otypes): self.otypes = ''.join([_nx.dtype(x).char for x in otypes]) else: raise ValueError( "Invalid otype specification") # Excluded variable support if excluded is None: excluded = set() self.excluded = set(excluded) def __call__(self, *args, **kwargs): """ Return arrays with the results of `pyfunc` broadcast (vectorized) over `args` and `kwargs` not in `excluded`. """ excluded = self.excluded if not kwargs and not excluded: func = self.pyfunc vargs = args else: # The wrapper accepts only positional arguments: we use `names` and # `inds` to mutate `the_args` and `kwargs` to pass to the original # function. nargs = len(args) names = [_n for _n in kwargs if _n not in excluded] inds = [_i for _i in range(nargs) if _i not in excluded] the_args = list(args) def func(*vargs): for _n, _i in enumerate(inds): the_args[_i] = vargs[_n] kwargs.update(zip(names, vargs[len(inds):])) return self.pyfunc(*the_args, **kwargs) vargs = [args[_i] for _i in inds] vargs.extend([kwargs[_n] for _n in names]) return self._vectorize_call(func=func, args=vargs) def _get_ufunc_and_otypes(self, func, args): """Return (ufunc, otypes).""" # frompyfunc will fail if args is empty if not args: raise ValueError('args can not be empty') if self.otypes: otypes = self.otypes nout = len(otypes) # Note logic here: We only *use* self._ufunc if func is self.pyfunc # even though we set self._ufunc regardless. if func is self.pyfunc and self._ufunc is not None: ufunc = self._ufunc else: ufunc = self._ufunc = frompyfunc(func, len(args), nout) else: # Get number of outputs and output types by calling the function on # the first entries of args. We also cache the result to prevent # the subsequent call when the ufunc is evaluated. # Assumes that ufunc first evaluates the 0th elements in the input # arrays (the input values are not checked to ensure this) inputs = [asarray(_a).flat[0] for _a in args] outputs = func(*inputs) # Performance note: profiling indicates that -- for simple # functions at least -- this wrapping can almost double the # execution time. # Hence we make it optional. if self.cache: _cache = [outputs] def _func(*vargs): if _cache: return _cache.pop() else: return func(*vargs) else: _func = func if isinstance(outputs, tuple): nout = len(outputs) else: nout = 1 outputs = (outputs,) otypes = ''.join([asarray(outputs[_k]).dtype.char for _k in range(nout)]) # Performance note: profiling indicates that creating the ufunc is # not a significant cost compared with wrapping so it seems not # worth trying to cache this. ufunc = frompyfunc(_func, len(args), nout) return ufunc, otypes def _vectorize_call(self, func, args): """Vectorized call to `func` over positional `args`.""" if not args: _res = func() else: ufunc, otypes = self._get_ufunc_and_otypes(func=func, args=args) # Convert args to object arrays first inputs = [array(_a, copy=False, subok=True, dtype=object) for _a in args] outputs = ufunc(*inputs) if ufunc.nout == 1: _res = array(outputs, copy=False, subok=True, dtype=otypes[0]) else: _res = tuple([array(_x, copy=False, subok=True, dtype=_t) for _x, _t in zip(outputs, otypes)]) return _res def cov(m, y=None, rowvar=1, bias=0, ddof=None, fweights=None, aweights=None): """ Estimate a covariance matrix, given data and weights. Covariance indicates the level to which two variables vary together. If we examine N-dimensional samples, :math:`X = [x_1, x_2, ... x_N]^T`, then the covariance matrix element :math:`C_{ij}` is the covariance of :math:`x_i` and :math:`x_j`. The element :math:`C_{ii}` is the variance of :math:`x_i`. See the notes for an outline of the algorithm. Parameters ---------- m : array_like A 1-D or 2-D array containing multiple variables and observations. Each row of `m` represents a variable, and each column a single observation of all those variables. Also see `rowvar` below. y : array_like, optional An additional set of variables and observations. `y` has the same form as that of `m`. rowvar : int, optional If `rowvar` is non-zero (default), then each row represents a variable, with observations in the columns. Otherwise, the relationship is transposed: each column represents a variable, while the rows contain observations. bias : int, optional Default normalization is by ``(N - 1)``, where ``N`` corresponds to the number of observations given (unbiased estimate). If `bias` is 1, then normalization is by ``N``. These values can be overridden by using the keyword ``ddof`` in numpy versions >= 1.5. ddof : int, optional If not ``None`` the default value implied by `bias` is overridden. Note that ``ddof=1`` will return the unbiased estimate, even if both `fweights` and `aweights` are specified, and ``ddof=0`` will return the simple average. See the notes for the details. The default value is ``None``. .. versionadded:: 1.5 fweights : array_like, int, optional 1-D array of integer freguency weights; the number of times each observation vector should be repeated. .. versionadded:: 1.10 aweights : array_like, optional 1-D array of observation vector weights. These relative weights are typically large for observations considered "important" and smaller for observations considered less "important". If ``ddof=0`` the array of weights can be used to assign probabilities to observation vectors. .. versionadded:: 1.10 Returns ------- out : ndarray The covariance matrix of the variables. See Also -------- corrcoef : Normalized covariance matrix Notes ----- Assume that the observations are in the columns of the observation array `m` and let ``f = fweights`` and ``a = aweights`` for brevity. The steps to compute the weighted covariance are as follows:: >>> w = f * a >>> v1 = np.sum(w) >>> v2 = np.sum(w * a) >>> m -= np.sum(m * w, axis=1, keepdims=True) / v1 >>> cov = np.dot(m * w, m.T) * v1 / (v1**2 - ddof * v2) Note that when ``a == 1``, the normalization factor ``v1 / (v1**2 - ddof * v2)`` goes over to ``1 / (np.sum(f) - ddof)`` as it should. Examples -------- Consider two variables, :math:`x_0` and :math:`x_1`, which correlate perfectly, but in opposite directions: >>> x = np.array([[0, 2], [1, 1], [2, 0]]).T >>> x array([[0, 1, 2], [2, 1, 0]]) Note how :math:`x_0` increases while :math:`x_1` decreases. The covariance matrix shows this clearly: >>> np.cov(x) array([[ 1., -1.], [-1., 1.]]) Note that element :math:`C_{0,1}`, which shows the correlation between :math:`x_0` and :math:`x_1`, is negative. Further, note how `x` and `y` are combined: >>> x = [-2.1, -1, 4.3] >>> y = [3, 1.1, 0.12] >>> X = np.vstack((x,y)) >>> print np.cov(X) [[ 11.71 -4.286 ] [ -4.286 2.14413333]] >>> print np.cov(x, y) [[ 11.71 -4.286 ] [ -4.286 2.14413333]] >>> print np.cov(x) 11.71 """ # Check inputs if ddof is not None and ddof != int(ddof): raise ValueError( "ddof must be integer") # Handles complex arrays too m = np.asarray(m) if y is None: dtype = np.result_type(m, np.float64) else: y = np.asarray(y) dtype = np.result_type(m, y, np.float64) X = array(m, ndmin=2, dtype=dtype) if rowvar == 0 and X.shape[0] != 1: X = X.T if X.shape[0] == 0: return np.array([]).reshape(0, 0) if y is not None: y = array(y, copy=False, ndmin=2, dtype=dtype) if rowvar == 0 and y.shape[0] != 1: y = y.T X = np.vstack((X, y)) if ddof is None: if bias == 0: ddof = 1 else: ddof = 0 # Get the product of frequencies and weights w = None if fweights is not None: fweights = np.asarray(fweights, dtype=np.float) if not np.all(fweights == np.around(fweights)): raise TypeError( "fweights must be integer") if fweights.ndim > 1: raise RuntimeError( "cannot handle multidimensional fweights") if fweights.shape[0] != X.shape[1]: raise RuntimeError( "incompatible numbers of samples and fweights") if any(fweights < 0): raise ValueError( "fweights cannot be negative") w = fweights if aweights is not None: aweights = np.asarray(aweights, dtype=np.float) if aweights.ndim > 1: raise RuntimeError( "cannot handle multidimensional aweights") if aweights.shape[0] != X.shape[1]: raise RuntimeError( "incompatible numbers of samples and aweights") if any(aweights < 0): raise ValueError( "aweights cannot be negative") if w is None: w = aweights else: w *= aweights avg, w_sum = average(X, axis=1, weights=w, returned=True) w_sum = w_sum[0] # Determine the normalization if w is None: fact = X.shape[1] - ddof elif ddof == 0: fact = w_sum elif aweights is None: fact = w_sum - ddof else: fact = w_sum - ddof*sum(w*aweights)/w_sum if fact <= 0: warnings.warn("Degrees of freedom <= 0 for slice", RuntimeWarning) fact = 0.0 X -= avg[:, None] if w is None: X_T = X.T else: X_T = (X*w).T c = dot(X, X_T.conj()) c *= 1. / np.float64(fact) return c.squeeze() def corrcoef(x, y=None, rowvar=1, bias=np._NoValue, ddof=np._NoValue): """ Return Pearson product-moment correlation coefficients. Please refer to the documentation for `cov` for more detail. The relationship between the correlation coefficient matrix, `R`, and the covariance matrix, `C`, is .. math:: R_{ij} = \\frac{ C_{ij} } { \\sqrt{ C_{ii} * C_{jj} } } The values of `R` are between -1 and 1, inclusive. Parameters ---------- x : array_like A 1-D or 2-D array containing multiple variables and observations. Each row of `x` represents a variable, and each column a single observation of all those variables. Also see `rowvar` below. y : array_like, optional An additional set of variables and observations. `y` has the same shape as `x`. rowvar : int, optional If `rowvar` is non-zero (default), then each row represents a variable, with observations in the columns. Otherwise, the relationship is transposed: each column represents a variable, while the rows contain observations. bias : _NoValue, optional Has no effect, do not use. .. deprecated:: 1.10.0 ddof : _NoValue, optional Has no effect, do not use. .. deprecated:: 1.10.0 Returns ------- R : ndarray The correlation coefficient matrix of the variables. See Also -------- cov : Covariance matrix Notes ----- This function accepts but discards arguments `bias` and `ddof`. This is for backwards compatibility with previous versions of this function. These arguments had no effect on the return values of the function and can be safely ignored in this and previous versions of numpy. """ if bias is not np._NoValue or ddof is not np._NoValue: # 2015-03-15, 1.10 warnings.warn('bias and ddof have no effect and are deprecated', DeprecationWarning) c = cov(x, y, rowvar) try: d = diag(c) except ValueError: # scalar covariance # nan if incorrect value (nan, inf, 0), 1 otherwise return c / c d = sqrt(d) # calculate "c / multiply.outer(d, d)" row-wise ... for memory and speed for i in range(0, d.size): c[i,:] /= (d * d[i]) return c def blackman(M): """ Return the Blackman window. The Blackman window is a taper formed by using the first three terms of a summation of cosines. It was designed to have close to the minimal leakage possible. It is close to optimal, only slightly worse than a Kaiser window. Parameters ---------- M : int Number of points in the output window. If zero or less, an empty array is returned. Returns ------- out : ndarray The window, with the maximum value normalized to one (the value one appears only if the number of samples is odd). See Also -------- bartlett, hamming, hanning, kaiser Notes ----- The Blackman window is defined as .. math:: w(n) = 0.42 - 0.5 \\cos(2\\pi n/M) + 0.08 \\cos(4\\pi n/M) Most references to the Blackman window come from the signal processing literature, where it is used as one of many windowing functions for smoothing values. It is also known as an apodization (which means "removing the foot", i.e. smoothing discontinuities at the beginning and end of the sampled signal) or tapering function. It is known as a "near optimal" tapering function, almost as good (by some measures) as the kaiser window. References ---------- Blackman, R.B. and Tukey, J.W., (1958) The measurement of power spectra, Dover Publications, New York. Oppenheim, A.V., and R.W. Schafer. Discrete-Time Signal Processing. Upper Saddle River, NJ: Prentice-Hall, 1999, pp. 468-471. Examples -------- >>> np.blackman(12) array([ -1.38777878e-17, 3.26064346e-02, 1.59903635e-01, 4.14397981e-01, 7.36045180e-01, 9.67046769e-01, 9.67046769e-01, 7.36045180e-01, 4.14397981e-01, 1.59903635e-01, 3.26064346e-02, -1.38777878e-17]) Plot the window and the frequency response: >>> from numpy.fft import fft, fftshift >>> window = np.blackman(51) >>> plt.plot(window) [<matplotlib.lines.Line2D object at 0x...>] >>> plt.title("Blackman window") <matplotlib.text.Text object at 0x...> >>> plt.ylabel("Amplitude") <matplotlib.text.Text object at 0x...> >>> plt.xlabel("Sample") <matplotlib.text.Text object at 0x...> >>> plt.show() >>> plt.figure() <matplotlib.figure.Figure object at 0x...> >>> A = fft(window, 2048) / 25.5 >>> mag = np.abs(fftshift(A)) >>> freq = np.linspace(-0.5, 0.5, len(A)) >>> response = 20 * np.log10(mag) >>> response = np.clip(response, -100, 100) >>> plt.plot(freq, response) [<matplotlib.lines.Line2D object at 0x...>] >>> plt.title("Frequency response of Blackman window") <matplotlib.text.Text object at 0x...> >>> plt.ylabel("Magnitude [dB]") <matplotlib.text.Text object at 0x...> >>> plt.xlabel("Normalized frequency [cycles per sample]") <matplotlib.text.Text object at 0x...> >>> plt.axis('tight') (-0.5, 0.5, -100.0, ...) >>> plt.show() """ if M < 1: return array([]) if M == 1: return ones(1, float) n = arange(0, M) return 0.42 - 0.5*cos(2.0*pi*n/(M-1)) + 0.08*cos(4.0*pi*n/(M-1)) def bartlett(M): """ Return the Bartlett window. The Bartlett window is very similar to a triangular window, except that the end points are at zero. It is often used in signal processing for tapering a signal, without generating too much ripple in the frequency domain. Parameters ---------- M : int Number of points in the output window. If zero or less, an empty array is returned. Returns ------- out : array The triangular window, with the maximum value normalized to one (the value one appears only if the number of samples is odd), with the first and last samples equal to zero. See Also -------- blackman, hamming, hanning, kaiser Notes ----- The Bartlett window is defined as .. math:: w(n) = \\frac{2}{M-1} \\left( \\frac{M-1}{2} - \\left|n - \\frac{M-1}{2}\\right| \\right) Most references to the Bartlett window come from the signal processing literature, where it is used as one of many windowing functions for smoothing values. Note that convolution with this window produces linear interpolation. It is also known as an apodization (which means"removing the foot", i.e. smoothing discontinuities at the beginning and end of the sampled signal) or tapering function. The fourier transform of the Bartlett is the product of two sinc functions. Note the excellent discussion in Kanasewich. References ---------- .. [1] M.S. Bartlett, "Periodogram Analysis and Continuous Spectra", Biometrika 37, 1-16, 1950. .. [2] E.R. Kanasewich, "Time Sequence Analysis in Geophysics", The University of Alberta Press, 1975, pp. 109-110. .. [3] A.V. Oppenheim and R.W. Schafer, "Discrete-Time Signal Processing", Prentice-Hall, 1999, pp. 468-471. .. [4] Wikipedia, "Window function", http://en.wikipedia.org/wiki/Window_function .. [5] W.H. Press, B.P. Flannery, S.A. Teukolsky, and W.T. Vetterling, "Numerical Recipes", Cambridge University Press, 1986, page 429. Examples -------- >>> np.bartlett(12) array([ 0. , 0.18181818, 0.36363636, 0.54545455, 0.72727273, 0.90909091, 0.90909091, 0.72727273, 0.54545455, 0.36363636, 0.18181818, 0. ]) Plot the window and its frequency response (requires SciPy and matplotlib): >>> from numpy.fft import fft, fftshift >>> window = np.bartlett(51) >>> plt.plot(window) [<matplotlib.lines.Line2D object at 0x...>] >>> plt.title("Bartlett window") <matplotlib.text.Text object at 0x...> >>> plt.ylabel("Amplitude") <matplotlib.text.Text object at 0x...> >>> plt.xlabel("Sample") <matplotlib.text.Text object at 0x...> >>> plt.show() >>> plt.figure() <matplotlib.figure.Figure object at 0x...> >>> A = fft(window, 2048) / 25.5 >>> mag = np.abs(fftshift(A)) >>> freq = np.linspace(-0.5, 0.5, len(A)) >>> response = 20 * np.log10(mag) >>> response = np.clip(response, -100, 100) >>> plt.plot(freq, response) [<matplotlib.lines.Line2D object at 0x...>] >>> plt.title("Frequency response of Bartlett window") <matplotlib.text.Text object at 0x...> >>> plt.ylabel("Magnitude [dB]") <matplotlib.text.Text object at 0x...> >>> plt.xlabel("Normalized frequency [cycles per sample]") <matplotlib.text.Text object at 0x...> >>> plt.axis('tight') (-0.5, 0.5, -100.0, ...) >>> plt.show() """ if M < 1: return array([]) if M == 1: return ones(1, float) n = arange(0, M) return where(less_equal(n, (M-1)/2.0), 2.0*n/(M-1), 2.0 - 2.0*n/(M-1)) def hanning(M): """ Return the Hanning window. The Hanning window is a taper formed by using a weighted cosine. Parameters ---------- M : int Number of points in the output window. If zero or less, an empty array is returned. Returns ------- out : ndarray, shape(M,) The window, with the maximum value normalized to one (the value one appears only if `M` is odd). See Also -------- bartlett, blackman, hamming, kaiser Notes ----- The Hanning window is defined as .. math:: w(n) = 0.5 - 0.5cos\\left(\\frac{2\\pi{n}}{M-1}\\right) \\qquad 0 \\leq n \\leq M-1 The Hanning was named for Julius von Hann, an Austrian meteorologist. It is also known as the Cosine Bell. Some authors prefer that it be called a Hann window, to help avoid confusion with the very similar Hamming window. Most references to the Hanning window come from the signal processing literature, where it is used as one of many windowing functions for smoothing values. It is also known as an apodization (which means "removing the foot", i.e. smoothing discontinuities at the beginning and end of the sampled signal) or tapering function. References ---------- .. [1] Blackman, R.B. and Tukey, J.W., (1958) The measurement of power spectra, Dover Publications, New York. .. [2] E.R. Kanasewich, "Time Sequence Analysis in Geophysics", The University of Alberta Press, 1975, pp. 106-108. .. [3] Wikipedia, "Window function", http://en.wikipedia.org/wiki/Window_function .. [4] W.H. Press, B.P. Flannery, S.A. Teukolsky, and W.T. Vetterling, "Numerical Recipes", Cambridge University Press, 1986, page 425. Examples -------- >>> np.hanning(12) array([ 0. , 0.07937323, 0.29229249, 0.57115742, 0.82743037, 0.97974649, 0.97974649, 0.82743037, 0.57115742, 0.29229249, 0.07937323, 0. ]) Plot the window and its frequency response: >>> from numpy.fft import fft, fftshift >>> window = np.hanning(51) >>> plt.plot(window) [<matplotlib.lines.Line2D object at 0x...>] >>> plt.title("Hann window") <matplotlib.text.Text object at 0x...> >>> plt.ylabel("Amplitude") <matplotlib.text.Text object at 0x...> >>> plt.xlabel("Sample") <matplotlib.text.Text object at 0x...> >>> plt.show() >>> plt.figure() <matplotlib.figure.Figure object at 0x...> >>> A = fft(window, 2048) / 25.5 >>> mag = np.abs(fftshift(A)) >>> freq = np.linspace(-0.5, 0.5, len(A)) >>> response = 20 * np.log10(mag) >>> response = np.clip(response, -100, 100) >>> plt.plot(freq, response) [<matplotlib.lines.Line2D object at 0x...>] >>> plt.title("Frequency response of the Hann window") <matplotlib.text.Text object at 0x...> >>> plt.ylabel("Magnitude [dB]") <matplotlib.text.Text object at 0x...> >>> plt.xlabel("Normalized frequency [cycles per sample]") <matplotlib.text.Text object at 0x...> >>> plt.axis('tight') (-0.5, 0.5, -100.0, ...) >>> plt.show() """ if M < 1: return array([]) if M == 1: return ones(1, float) n = arange(0, M) return 0.5 - 0.5*cos(2.0*pi*n/(M-1)) def hamming(M): """ Return the Hamming window. The Hamming window is a taper formed by using a weighted cosine. Parameters ---------- M : int Number of points in the output window. If zero or less, an empty array is returned. Returns ------- out : ndarray The window, with the maximum value normalized to one (the value one appears only if the number of samples is odd). See Also -------- bartlett, blackman, hanning, kaiser Notes ----- The Hamming window is defined as .. math:: w(n) = 0.54 - 0.46cos\\left(\\frac{2\\pi{n}}{M-1}\\right) \\qquad 0 \\leq n \\leq M-1 The Hamming was named for R. W. Hamming, an associate of J. W. Tukey and is described in Blackman and Tukey. It was recommended for smoothing the truncated autocovariance function in the time domain. Most references to the Hamming window come from the signal processing literature, where it is used as one of many windowing functions for smoothing values. It is also known as an apodization (which means "removing the foot", i.e. smoothing discontinuities at the beginning and end of the sampled signal) or tapering function. References ---------- .. [1] Blackman, R.B. and Tukey, J.W., (1958) The measurement of power spectra, Dover Publications, New York. .. [2] E.R. Kanasewich, "Time Sequence Analysis in Geophysics", The University of Alberta Press, 1975, pp. 109-110. .. [3] Wikipedia, "Window function", http://en.wikipedia.org/wiki/Window_function .. [4] W.H. Press, B.P. Flannery, S.A. Teukolsky, and W.T. Vetterling, "Numerical Recipes", Cambridge University Press, 1986, page 425. Examples -------- >>> np.hamming(12) array([ 0.08 , 0.15302337, 0.34890909, 0.60546483, 0.84123594, 0.98136677, 0.98136677, 0.84123594, 0.60546483, 0.34890909, 0.15302337, 0.08 ]) Plot the window and the frequency response: >>> from numpy.fft import fft, fftshift >>> window = np.hamming(51) >>> plt.plot(window) [<matplotlib.lines.Line2D object at 0x...>] >>> plt.title("Hamming window") <matplotlib.text.Text object at 0x...> >>> plt.ylabel("Amplitude") <matplotlib.text.Text object at 0x...> >>> plt.xlabel("Sample") <matplotlib.text.Text object at 0x...> >>> plt.show() >>> plt.figure() <matplotlib.figure.Figure object at 0x...> >>> A = fft(window, 2048) / 25.5 >>> mag = np.abs(fftshift(A)) >>> freq = np.linspace(-0.5, 0.5, len(A)) >>> response = 20 * np.log10(mag) >>> response = np.clip(response, -100, 100) >>> plt.plot(freq, response) [<matplotlib.lines.Line2D object at 0x...>] >>> plt.title("Frequency response of Hamming window") <matplotlib.text.Text object at 0x...> >>> plt.ylabel("Magnitude [dB]") <matplotlib.text.Text object at 0x...> >>> plt.xlabel("Normalized frequency [cycles per sample]") <matplotlib.text.Text object at 0x...> >>> plt.axis('tight') (-0.5, 0.5, -100.0, ...) >>> plt.show() """ if M < 1: return array([]) if M == 1: return ones(1, float) n = arange(0, M) return 0.54 - 0.46*cos(2.0*pi*n/(M-1)) ## Code from cephes for i0 _i0A = [ -4.41534164647933937950E-18, 3.33079451882223809783E-17, -2.43127984654795469359E-16, 1.71539128555513303061E-15, -1.16853328779934516808E-14, 7.67618549860493561688E-14, -4.85644678311192946090E-13, 2.95505266312963983461E-12, -1.72682629144155570723E-11, 9.67580903537323691224E-11, -5.18979560163526290666E-10, 2.65982372468238665035E-9, -1.30002500998624804212E-8, 6.04699502254191894932E-8, -2.67079385394061173391E-7, 1.11738753912010371815E-6, -4.41673835845875056359E-6, 1.64484480707288970893E-5, -5.75419501008210370398E-5, 1.88502885095841655729E-4, -5.76375574538582365885E-4, 1.63947561694133579842E-3, -4.32430999505057594430E-3, 1.05464603945949983183E-2, -2.37374148058994688156E-2, 4.93052842396707084878E-2, -9.49010970480476444210E-2, 1.71620901522208775349E-1, -3.04682672343198398683E-1, 6.76795274409476084995E-1 ] _i0B = [ -7.23318048787475395456E-18, -4.83050448594418207126E-18, 4.46562142029675999901E-17, 3.46122286769746109310E-17, -2.82762398051658348494E-16, -3.42548561967721913462E-16, 1.77256013305652638360E-15, 3.81168066935262242075E-15, -9.55484669882830764870E-15, -4.15056934728722208663E-14, 1.54008621752140982691E-14, 3.85277838274214270114E-13, 7.18012445138366623367E-13, -1.79417853150680611778E-12, -1.32158118404477131188E-11, -3.14991652796324136454E-11, 1.18891471078464383424E-11, 4.94060238822496958910E-10, 3.39623202570838634515E-9, 2.26666899049817806459E-8, 2.04891858946906374183E-7, 2.89137052083475648297E-6, 6.88975834691682398426E-5, 3.36911647825569408990E-3, 8.04490411014108831608E-1 ] def _chbevl(x, vals): b0 = vals[0] b1 = 0.0 for i in range(1, len(vals)): b2 = b1 b1 = b0 b0 = x*b1 - b2 + vals[i] return 0.5*(b0 - b2) def _i0_1(x): return exp(x) * _chbevl(x/2.0-2, _i0A) def _i0_2(x): return exp(x) * _chbevl(32.0/x - 2.0, _i0B) / sqrt(x) def i0(x): """ Modified Bessel function of the first kind, order 0. Usually denoted :math:`I_0`. This function does broadcast, but will *not* "up-cast" int dtype arguments unless accompanied by at least one float or complex dtype argument (see Raises below). Parameters ---------- x : array_like, dtype float or complex Argument of the Bessel function. Returns ------- out : ndarray, shape = x.shape, dtype = x.dtype The modified Bessel function evaluated at each of the elements of `x`. Raises ------ TypeError: array cannot be safely cast to required type If argument consists exclusively of int dtypes. See Also -------- scipy.special.iv, scipy.special.ive Notes ----- We use the algorithm published by Clenshaw [1]_ and referenced by Abramowitz and Stegun [2]_, for which the function domain is partitioned into the two intervals [0,8] and (8,inf), and Chebyshev polynomial expansions are employed in each interval. Relative error on the domain [0,30] using IEEE arithmetic is documented [3]_ as having a peak of 5.8e-16 with an rms of 1.4e-16 (n = 30000). References ---------- .. [1] C. W. Clenshaw, "Chebyshev series for mathematical functions", in *National Physical Laboratory Mathematical Tables*, vol. 5, London: Her Majesty's Stationery Office, 1962. .. [2] M. Abramowitz and I. A. Stegun, *Handbook of Mathematical Functions*, 10th printing, New York: Dover, 1964, pp. 379. http://www.math.sfu.ca/~cbm/aands/page_379.htm .. [3] http://kobesearch.cpan.org/htdocs/Math-Cephes/Math/Cephes.html Examples -------- >>> np.i0([0.]) array(1.0) >>> np.i0([0., 1. + 2j]) array([ 1.00000000+0.j , 0.18785373+0.64616944j]) """ x = atleast_1d(x).copy() y = empty_like(x) ind = (x < 0) x[ind] = -x[ind] ind = (x <= 8.0) y[ind] = _i0_1(x[ind]) ind2 = ~ind y[ind2] = _i0_2(x[ind2]) return y.squeeze() ## End of cephes code for i0 def kaiser(M, beta): """ Return the Kaiser window. The Kaiser window is a taper formed by using a Bessel function. Parameters ---------- M : int Number of points in the output window. If zero or less, an empty array is returned. beta : float Shape parameter for window. Returns ------- out : array The window, with the maximum value normalized to one (the value one appears only if the number of samples is odd). See Also -------- bartlett, blackman, hamming, hanning Notes ----- The Kaiser window is defined as .. math:: w(n) = I_0\\left( \\beta \\sqrt{1-\\frac{4n^2}{(M-1)^2}} \\right)/I_0(\\beta) with .. math:: \\quad -\\frac{M-1}{2} \\leq n \\leq \\frac{M-1}{2}, where :math:`I_0` is the modified zeroth-order Bessel function. The Kaiser was named for Jim Kaiser, who discovered a simple approximation to the DPSS window based on Bessel functions. The Kaiser window is a very good approximation to the Digital Prolate Spheroidal Sequence, or Slepian window, which is the transform which maximizes the energy in the main lobe of the window relative to total energy. The Kaiser can approximate many other windows by varying the beta parameter. ==== ======================= beta Window shape ==== ======================= 0 Rectangular 5 Similar to a Hamming 6 Similar to a Hanning 8.6 Similar to a Blackman ==== ======================= A beta value of 14 is probably a good starting point. Note that as beta gets large, the window narrows, and so the number of samples needs to be large enough to sample the increasingly narrow spike, otherwise NaNs will get returned. Most references to the Kaiser window come from the signal processing literature, where it is used as one of many windowing functions for smoothing values. It is also known as an apodization (which means "removing the foot", i.e. smoothing discontinuities at the beginning and end of the sampled signal) or tapering function. References ---------- .. [1] J. F. Kaiser, "Digital Filters" - Ch 7 in "Systems analysis by digital computer", Editors: F.F. Kuo and J.F. Kaiser, p 218-285. John Wiley and Sons, New York, (1966). .. [2] E.R. Kanasewich, "Time Sequence Analysis in Geophysics", The University of Alberta Press, 1975, pp. 177-178. .. [3] Wikipedia, "Window function", http://en.wikipedia.org/wiki/Window_function Examples -------- >>> np.kaiser(12, 14) array([ 7.72686684e-06, 3.46009194e-03, 4.65200189e-02, 2.29737120e-01, 5.99885316e-01, 9.45674898e-01, 9.45674898e-01, 5.99885316e-01, 2.29737120e-01, 4.65200189e-02, 3.46009194e-03, 7.72686684e-06]) Plot the window and the frequency response: >>> from numpy.fft import fft, fftshift >>> window = np.kaiser(51, 14) >>> plt.plot(window) [<matplotlib.lines.Line2D object at 0x...>] >>> plt.title("Kaiser window") <matplotlib.text.Text object at 0x...> >>> plt.ylabel("Amplitude") <matplotlib.text.Text object at 0x...> >>> plt.xlabel("Sample") <matplotlib.text.Text object at 0x...> >>> plt.show() >>> plt.figure() <matplotlib.figure.Figure object at 0x...> >>> A = fft(window, 2048) / 25.5 >>> mag = np.abs(fftshift(A)) >>> freq = np.linspace(-0.5, 0.5, len(A)) >>> response = 20 * np.log10(mag) >>> response = np.clip(response, -100, 100) >>> plt.plot(freq, response) [<matplotlib.lines.Line2D object at 0x...>] >>> plt.title("Frequency response of Kaiser window") <matplotlib.text.Text object at 0x...> >>> plt.ylabel("Magnitude [dB]") <matplotlib.text.Text object at 0x...> >>> plt.xlabel("Normalized frequency [cycles per sample]") <matplotlib.text.Text object at 0x...> >>> plt.axis('tight') (-0.5, 0.5, -100.0, ...) >>> plt.show() """ from numpy.dual import i0 if M == 1: return np.array([1.]) n = arange(0, M) alpha = (M-1)/2.0 return i0(beta * sqrt(1-((n-alpha)/alpha)**2.0))/i0(float(beta)) def sinc(x): """ Return the sinc function. The sinc function is :math:`\\sin(\\pi x)/(\\pi x)`. Parameters ---------- x : ndarray Array (possibly multi-dimensional) of values for which to to calculate ``sinc(x)``. Returns ------- out : ndarray ``sinc(x)``, which has the same shape as the input. Notes ----- ``sinc(0)`` is the limit value 1. The name sinc is short for "sine cardinal" or "sinus cardinalis". The sinc function is used in various signal processing applications, including in anti-aliasing, in the construction of a Lanczos resampling filter, and in interpolation. For bandlimited interpolation of discrete-time signals, the ideal interpolation kernel is proportional to the sinc function. References ---------- .. [1] Weisstein, Eric W. "Sinc Function." From MathWorld--A Wolfram Web Resource. http://mathworld.wolfram.com/SincFunction.html .. [2] Wikipedia, "Sinc function", http://en.wikipedia.org/wiki/Sinc_function Examples -------- >>> x = np.linspace(-4, 4, 41) >>> np.sinc(x) array([ -3.89804309e-17, -4.92362781e-02, -8.40918587e-02, -8.90384387e-02, -5.84680802e-02, 3.89804309e-17, 6.68206631e-02, 1.16434881e-01, 1.26137788e-01, 8.50444803e-02, -3.89804309e-17, -1.03943254e-01, -1.89206682e-01, -2.16236208e-01, -1.55914881e-01, 3.89804309e-17, 2.33872321e-01, 5.04551152e-01, 7.56826729e-01, 9.35489284e-01, 1.00000000e+00, 9.35489284e-01, 7.56826729e-01, 5.04551152e-01, 2.33872321e-01, 3.89804309e-17, -1.55914881e-01, -2.16236208e-01, -1.89206682e-01, -1.03943254e-01, -3.89804309e-17, 8.50444803e-02, 1.26137788e-01, 1.16434881e-01, 6.68206631e-02, 3.89804309e-17, -5.84680802e-02, -8.90384387e-02, -8.40918587e-02, -4.92362781e-02, -3.89804309e-17]) >>> plt.plot(x, np.sinc(x)) [<matplotlib.lines.Line2D object at 0x...>] >>> plt.title("Sinc Function") <matplotlib.text.Text object at 0x...> >>> plt.ylabel("Amplitude") <matplotlib.text.Text object at 0x...> >>> plt.xlabel("X") <matplotlib.text.Text object at 0x...> >>> plt.show() It works in 2-D as well: >>> x = np.linspace(-4, 4, 401) >>> xx = np.outer(x, x) >>> plt.imshow(np.sinc(xx)) <matplotlib.image.AxesImage object at 0x...> """ x = np.asanyarray(x) y = pi * where(x == 0, 1.0e-20, x) return sin(y)/y def msort(a): """ Return a copy of an array sorted along the first axis. Parameters ---------- a : array_like Array to be sorted. Returns ------- sorted_array : ndarray Array of the same type and shape as `a`. See Also -------- sort Notes ----- ``np.msort(a)`` is equivalent to ``np.sort(a, axis=0)``. """ b = array(a, subok=True, copy=True) b.sort(0) return b def _ureduce(a, func, **kwargs): """ Internal Function. Call `func` with `a` as first argument swapping the axes to use extended axis on functions that don't support it natively. Returns result and a.shape with axis dims set to 1. Parameters ---------- a : array_like Input array or object that can be converted to an array. func : callable Reduction function Kapable of receiving an axis argument. It is is called with `a` as first argument followed by `kwargs`. kwargs : keyword arguments additional keyword arguments to pass to `func`. Returns ------- result : tuple Result of func(a, **kwargs) and a.shape with axis dims set to 1 which can be used to reshape the result to the same shape a ufunc with keepdims=True would produce. """ a = np.asanyarray(a) axis = kwargs.get('axis', None) if axis is not None: keepdim = list(a.shape) nd = a.ndim try: axis = operator.index(axis) if axis >= nd or axis < -nd: raise IndexError("axis %d out of bounds (%d)" % (axis, a.ndim)) keepdim[axis] = 1 except TypeError: sax = set() for x in axis: if x >= nd or x < -nd: raise IndexError("axis %d out of bounds (%d)" % (x, nd)) if x in sax: raise ValueError("duplicate value in axis") sax.add(x % nd) keepdim[x] = 1 keep = sax.symmetric_difference(frozenset(range(nd))) nkeep = len(keep) # swap axis that should not be reduced to front for i, s in enumerate(sorted(keep)): a = a.swapaxes(i, s) # merge reduced axis a = a.reshape(a.shape[:nkeep] + (-1,)) kwargs['axis'] = -1 else: keepdim = [1] * a.ndim r = func(a, **kwargs) return r, keepdim def median(a, axis=None, out=None, overwrite_input=False, keepdims=False): """ Compute the median along the specified axis. Returns the median of the array elements. Parameters ---------- a : array_like Input array or object that can be converted to an array. axis : int or sequence of int, optional Axis along which the medians are computed. The default (axis=None) is to compute the median along a flattened version of the array. A sequence of axes is supported since version 1.9.0. out : ndarray, optional Alternative output array in which to place the result. It must have the same shape and buffer length as the expected output, but the type (of the output) will be cast if necessary. overwrite_input : bool, optional If True, then allow use of memory of input array (a) for calculations. The input array will be modified by the call to median. This will save memory when you do not need to preserve the contents of the input array. Treat the input as undefined, but it will probably be fully or partially sorted. Default is False. Note that, if `overwrite_input` is True and the input is not already an ndarray, an error will be raised. keepdims : bool, optional If this is set to True, the axes which are reduced are left in the result as dimensions with size one. With this option, the result will broadcast correctly against the original `arr`. .. versionadded:: 1.9.0 Returns ------- median : ndarray A new array holding the result (unless `out` is specified, in which case that array is returned instead). If the input contains integers, or floats of smaller precision than 64, then the output data-type is float64. Otherwise, the output data-type is the same as that of the input. See Also -------- mean, percentile Notes ----- Given a vector V of length N, the median of V is the middle value of a sorted copy of V, ``V_sorted`` - i.e., ``V_sorted[(N-1)/2]``, when N is odd. When N is even, it is the average of the two middle values of ``V_sorted``. Examples -------- >>> a = np.array([[10, 7, 4], [3, 2, 1]]) >>> a array([[10, 7, 4], [ 3, 2, 1]]) >>> np.median(a) 3.5 >>> np.median(a, axis=0) array([ 6.5, 4.5, 2.5]) >>> np.median(a, axis=1) array([ 7., 2.]) >>> m = np.median(a, axis=0) >>> out = np.zeros_like(m) >>> np.median(a, axis=0, out=m) array([ 6.5, 4.5, 2.5]) >>> m array([ 6.5, 4.5, 2.5]) >>> b = a.copy() >>> np.median(b, axis=1, overwrite_input=True) array([ 7., 2.]) >>> assert not np.all(a==b) >>> b = a.copy() >>> np.median(b, axis=None, overwrite_input=True) 3.5 >>> assert not np.all(a==b) """ r, k = _ureduce(a, func=_median, axis=axis, out=out, overwrite_input=overwrite_input) if keepdims: return r.reshape(k) else: return r def _median(a, axis=None, out=None, overwrite_input=False): # can't be reasonably be implemented in terms of percentile as we have to # call mean to not break astropy a = np.asanyarray(a) # Set the partition indexes if axis is None: sz = a.size else: sz = a.shape[axis] if sz % 2 == 0: szh = sz // 2 kth = [szh - 1, szh] else: kth = [(sz - 1) // 2] # Check if the array contains any nan's if np.issubdtype(a.dtype, np.inexact): kth.append(-1) if overwrite_input: if axis is None: part = a.ravel() part.partition(kth) else: a.partition(kth, axis=axis) part = a else: part = partition(a, kth, axis=axis) if part.shape == (): # make 0-D arrays work return part.item() if axis is None: axis = 0 indexer = [slice(None)] * part.ndim index = part.shape[axis] // 2 if part.shape[axis] % 2 == 1: # index with slice to allow mean (below) to work indexer[axis] = slice(index, index+1) else: indexer[axis] = slice(index-1, index+1) # Check if the array contains any nan's if np.issubdtype(a.dtype, np.inexact): # warn and return nans like mean would rout = mean(part[indexer], axis=axis, out=out) part = np.rollaxis(part, axis, part.ndim) n = np.isnan(part[..., -1]) if rout.ndim == 0: if n == True: warnings.warn("Invalid value encountered in median", RuntimeWarning) if out is not None: out[...] = a.dtype.type(np.nan) rout = out else: rout = a.dtype.type(np.nan) elif np.count_nonzero(n.ravel()) > 0: warnings.warn("Invalid value encountered in median for" + " %d results" % np.count_nonzero(n.ravel()), RuntimeWarning) rout[n] = np.nan return rout else: # if there are no nans # Use mean in odd and even case to coerce data type # and check, use out array. return mean(part[indexer], axis=axis, out=out) def percentile(a, q, axis=None, out=None, overwrite_input=False, interpolation='linear', keepdims=False): """ Compute the qth percentile of the data along the specified axis. Returns the qth percentile of the array elements. Parameters ---------- a : array_like Input array or object that can be converted to an array. q : float in range of [0,100] (or sequence of floats) Percentile to compute which must be between 0 and 100 inclusive. axis : int or sequence of int, optional Axis along which the percentiles are computed. The default (None) is to compute the percentiles along a flattened version of the array. A sequence of axes is supported since version 1.9.0. out : ndarray, optional Alternative output array in which to place the result. It must have the same shape and buffer length as the expected output, but the type (of the output) will be cast if necessary. overwrite_input : bool, optional If True, then allow use of memory of input array `a` for calculations. The input array will be modified by the call to percentile. This will save memory when you do not need to preserve the contents of the input array. In this case you should not make any assumptions about the content of the passed in array `a` after this function completes -- treat it as undefined. Default is False. Note that, if the `a` input is not already an array this parameter will have no effect, `a` will be converted to an array internally regardless of the value of this parameter. interpolation : {'linear', 'lower', 'higher', 'midpoint', 'nearest'} This optional parameter specifies the interpolation method to use, when the desired quantile lies between two data points `i` and `j`: * linear: `i + (j - i) * fraction`, where `fraction` is the fractional part of the index surrounded by `i` and `j`. * lower: `i`. * higher: `j`. * nearest: `i` or `j` whichever is nearest. * midpoint: (`i` + `j`) / 2. .. versionadded:: 1.9.0 keepdims : bool, optional If this is set to True, the axes which are reduced are left in the result as dimensions with size one. With this option, the result will broadcast correctly against the original array `a`. .. versionadded:: 1.9.0 Returns ------- percentile : scalar or ndarray If a single percentile `q` is given and axis=None a scalar is returned. If multiple percentiles `q` are given an array holding the result is returned. The results are listed in the first axis. (If `out` is specified, in which case that array is returned instead). If the input contains integers, or floats of smaller precision than 64, then the output data-type is float64. Otherwise, the output data-type is the same as that of the input. See Also -------- mean, median Notes ----- Given a vector V of length N, the q-th percentile of V is the q-th ranked value in a sorted copy of V. The values and distances of the two nearest neighbors as well as the `interpolation` parameter will determine the percentile if the normalized ranking does not match q exactly. This function is the same as the median if ``q=50``, the same as the minimum if ``q=0`` and the same as the maximum if ``q=100``. Examples -------- >>> a = np.array([[10, 7, 4], [3, 2, 1]]) >>> a array([[10, 7, 4], [ 3, 2, 1]]) >>> np.percentile(a, 50) array([ 3.5]) >>> np.percentile(a, 50, axis=0) array([[ 6.5, 4.5, 2.5]]) >>> np.percentile(a, 50, axis=1) array([[ 7.], [ 2.]]) >>> m = np.percentile(a, 50, axis=0) >>> out = np.zeros_like(m) >>> np.percentile(a, 50, axis=0, out=m) array([[ 6.5, 4.5, 2.5]]) >>> m array([[ 6.5, 4.5, 2.5]]) >>> b = a.copy() >>> np.percentile(b, 50, axis=1, overwrite_input=True) array([[ 7.], [ 2.]]) >>> assert not np.all(a==b) >>> b = a.copy() >>> np.percentile(b, 50, axis=None, overwrite_input=True) array([ 3.5]) """ q = array(q, dtype=np.float64, copy=True) r, k = _ureduce(a, func=_percentile, q=q, axis=axis, out=out, overwrite_input=overwrite_input, interpolation=interpolation) if keepdims: if q.ndim == 0: return r.reshape(k) else: return r.reshape([len(q)] + k) else: return r def _percentile(a, q, axis=None, out=None, overwrite_input=False, interpolation='linear', keepdims=False): a = asarray(a) if q.ndim == 0: # Do not allow 0-d arrays because following code fails for scalar zerod = True q = q[None] else: zerod = False # avoid expensive reductions, relevant for arrays with < O(1000) elements if q.size < 10: for i in range(q.size): if q[i] < 0. or q[i] > 100.: raise ValueError("Percentiles must be in the range [0,100]") q[i] /= 100. else: # faster than any() if np.count_nonzero(q < 0.) or np.count_nonzero(q > 100.): raise ValueError("Percentiles must be in the range [0,100]") q /= 100. # prepare a for partioning if overwrite_input: if axis is None: ap = a.ravel() else: ap = a else: if axis is None: ap = a.flatten() else: ap = a.copy() if axis is None: axis = 0 Nx = ap.shape[axis] indices = q * (Nx - 1) # round fractional indices according to interpolation method if interpolation == 'lower': indices = floor(indices).astype(intp) elif interpolation == 'higher': indices = ceil(indices).astype(intp) elif interpolation == 'midpoint': indices = floor(indices) + 0.5 elif interpolation == 'nearest': indices = around(indices).astype(intp) elif interpolation == 'linear': pass # keep index as fraction and interpolate else: raise ValueError( "interpolation can only be 'linear', 'lower' 'higher', " "'midpoint', or 'nearest'") n = np.array(False, dtype=bool) # check for nan's flag if indices.dtype == intp: # take the points along axis # Check if the array contains any nan's if np.issubdtype(a.dtype, np.inexact): indices = concatenate((indices, [-1])) ap.partition(indices, axis=axis) # ensure axis with qth is first ap = np.rollaxis(ap, axis, 0) axis = 0 # Check if the array contains any nan's if np.issubdtype(a.dtype, np.inexact): indices = indices[:-1] n = np.isnan(ap[-1:, ...]) if zerod: indices = indices[0] r = take(ap, indices, axis=axis, out=out) else: # weight the points above and below the indices indices_below = floor(indices).astype(intp) indices_above = indices_below + 1 indices_above[indices_above > Nx - 1] = Nx - 1 # Check if the array contains any nan's if np.issubdtype(a.dtype, np.inexact): indices_above = concatenate((indices_above, [-1])) weights_above = indices - indices_below weights_below = 1.0 - weights_above weights_shape = [1, ] * ap.ndim weights_shape[axis] = len(indices) weights_below.shape = weights_shape weights_above.shape = weights_shape ap.partition(concatenate((indices_below, indices_above)), axis=axis) # ensure axis with qth is first ap = np.rollaxis(ap, axis, 0) weights_below = np.rollaxis(weights_below, axis, 0) weights_above = np.rollaxis(weights_above, axis, 0) axis = 0 # Check if the array contains any nan's if np.issubdtype(a.dtype, np.inexact): indices_above = indices_above[:-1] n = np.isnan(ap[-1:, ...]) x1 = take(ap, indices_below, axis=axis) * weights_below x2 = take(ap, indices_above, axis=axis) * weights_above # ensure axis with qth is first x1 = np.rollaxis(x1, axis, 0) x2 = np.rollaxis(x2, axis, 0) if zerod: x1 = x1.squeeze(0) x2 = x2.squeeze(0) if out is not None: r = add(x1, x2, out=out) else: r = add(x1, x2) if np.any(n): warnings.warn("Invalid value encountered in median", RuntimeWarning) if zerod: if ap.ndim == 1: if out is not None: out[...] = a.dtype.type(np.nan) r = out else: r = a.dtype.type(np.nan) else: r[..., n.squeeze(0)] = a.dtype.type(np.nan) else: if r.ndim == 1: r[:] = a.dtype.type(np.nan) else: r[..., n.repeat(q.size, 0)] = a.dtype.type(np.nan) return r def trapz(y, x=None, dx=1.0, axis=-1): """ Integrate along the given axis using the composite trapezoidal rule. Integrate `y` (`x`) along given axis. Parameters ---------- y : array_like Input array to integrate. x : array_like, optional If `x` is None, then spacing between all `y` elements is `dx`. dx : scalar, optional If `x` is None, spacing given by `dx` is assumed. Default is 1. axis : int, optional Specify the axis. Returns ------- trapz : float Definite integral as approximated by trapezoidal rule. See Also -------- sum, cumsum Notes ----- Image [2]_ illustrates trapezoidal rule -- y-axis locations of points will be taken from `y` array, by default x-axis distances between points will be 1.0, alternatively they can be provided with `x` array or with `dx` scalar. Return value will be equal to combined area under the red lines. References ---------- .. [1] Wikipedia page: http://en.wikipedia.org/wiki/Trapezoidal_rule .. [2] Illustration image: http://en.wikipedia.org/wiki/File:Composite_trapezoidal_rule_illustration.png Examples -------- >>> np.trapz([1,2,3]) 4.0 >>> np.trapz([1,2,3], x=[4,6,8]) 8.0 >>> np.trapz([1,2,3], dx=2) 8.0 >>> a = np.arange(6).reshape(2, 3) >>> a array([[0, 1, 2], [3, 4, 5]]) >>> np.trapz(a, axis=0) array([ 1.5, 2.5, 3.5]) >>> np.trapz(a, axis=1) array([ 2., 8.]) """ y = asanyarray(y) if x is None: d = dx else: x = asanyarray(x) if x.ndim == 1: d = diff(x) # reshape to correct shape shape = [1]*y.ndim shape[axis] = d.shape[0] d = d.reshape(shape) else: d = diff(x, axis=axis) nd = len(y.shape) slice1 = [slice(None)]*nd slice2 = [slice(None)]*nd slice1[axis] = slice(1, None) slice2[axis] = slice(None, -1) try: ret = (d * (y[slice1] + y[slice2]) / 2.0).sum(axis) except ValueError: # Operations didn't work, cast to ndarray d = np.asarray(d) y = np.asarray(y) ret = add.reduce(d * (y[slice1]+y[slice2])/2.0, axis) return ret #always succeed def add_newdoc(place, obj, doc): """Adds documentation to obj which is in module place. If doc is a string add it to obj as a docstring If doc is a tuple, then the first element is interpreted as an attribute of obj and the second as the docstring (method, docstring) If doc is a list, then each element of the list should be a sequence of length two --> [(method1, docstring1), (method2, docstring2), ...] This routine never raises an error. This routine cannot modify read-only docstrings, as appear in new-style classes or built-in functions. Because this routine never raises an error the caller must check manually that the docstrings were changed. """ try: new = getattr(__import__(place, globals(), {}, [obj]), obj) if isinstance(doc, str): add_docstring(new, doc.strip()) elif isinstance(doc, tuple): add_docstring(getattr(new, doc[0]), doc[1].strip()) elif isinstance(doc, list): for val in doc: add_docstring(getattr(new, val[0]), val[1].strip()) except: pass # Based on scitools meshgrid def meshgrid(*xi, **kwargs): """ Return coordinate matrices from coordinate vectors. Make N-D coordinate arrays for vectorized evaluations of N-D scalar/vector fields over N-D grids, given one-dimensional coordinate arrays x1, x2,..., xn. .. versionchanged:: 1.9 1-D and 0-D cases are allowed. Parameters ---------- x1, x2,..., xn : array_like 1-D arrays representing the coordinates of a grid. indexing : {'xy', 'ij'}, optional Cartesian ('xy', default) or matrix ('ij') indexing of output. See Notes for more details. .. versionadded:: 1.7.0 sparse : bool, optional If True a sparse grid is returned in order to conserve memory. Default is False. .. versionadded:: 1.7.0 copy : bool, optional If False, a view into the original arrays are returned in order to conserve memory. Default is True. Please note that ``sparse=False, copy=False`` will likely return non-contiguous arrays. Furthermore, more than one element of a broadcast array may refer to a single memory location. If you need to write to the arrays, make copies first. .. versionadded:: 1.7.0 Returns ------- X1, X2,..., XN : ndarray For vectors `x1`, `x2`,..., 'xn' with lengths ``Ni=len(xi)`` , return ``(N1, N2, N3,...Nn)`` shaped arrays if indexing='ij' or ``(N2, N1, N3,...Nn)`` shaped arrays if indexing='xy' with the elements of `xi` repeated to fill the matrix along the first dimension for `x1`, the second for `x2` and so on. Notes ----- This function supports both indexing conventions through the indexing keyword argument. Giving the string 'ij' returns a meshgrid with matrix indexing, while 'xy' returns a meshgrid with Cartesian indexing. In the 2-D case with inputs of length M and N, the outputs are of shape (N, M) for 'xy' indexing and (M, N) for 'ij' indexing. In the 3-D case with inputs of length M, N and P, outputs are of shape (N, M, P) for 'xy' indexing and (M, N, P) for 'ij' indexing. The difference is illustrated by the following code snippet:: xv, yv = meshgrid(x, y, sparse=False, indexing='ij') for i in range(nx): for j in range(ny): # treat xv[i,j], yv[i,j] xv, yv = meshgrid(x, y, sparse=False, indexing='xy') for i in range(nx): for j in range(ny): # treat xv[j,i], yv[j,i] In the 1-D and 0-D case, the indexing and sparse keywords have no effect. See Also -------- index_tricks.mgrid : Construct a multi-dimensional "meshgrid" using indexing notation. index_tricks.ogrid : Construct an open multi-dimensional "meshgrid" using indexing notation. Examples -------- >>> nx, ny = (3, 2) >>> x = np.linspace(0, 1, nx) >>> y = np.linspace(0, 1, ny) >>> xv, yv = meshgrid(x, y) >>> xv array([[ 0. , 0.5, 1. ], [ 0. , 0.5, 1. ]]) >>> yv array([[ 0., 0., 0.], [ 1., 1., 1.]]) >>> xv, yv = meshgrid(x, y, sparse=True) # make sparse output arrays >>> xv array([[ 0. , 0.5, 1. ]]) >>> yv array([[ 0.], [ 1.]]) `meshgrid` is very useful to evaluate functions on a grid. >>> x = np.arange(-5, 5, 0.1) >>> y = np.arange(-5, 5, 0.1) >>> xx, yy = meshgrid(x, y, sparse=True) >>> z = np.sin(xx**2 + yy**2) / (xx**2 + yy**2) >>> h = plt.contourf(x,y,z) """ ndim = len(xi) copy_ = kwargs.pop('copy', True) sparse = kwargs.pop('sparse', False) indexing = kwargs.pop('indexing', 'xy') if kwargs: raise TypeError("meshgrid() got an unexpected keyword argument '%s'" % (list(kwargs)[0],)) if indexing not in ['xy', 'ij']: raise ValueError( "Valid values for `indexing` are 'xy' and 'ij'.") s0 = (1,) * ndim output = [np.asanyarray(x).reshape(s0[:i] + (-1,) + s0[i + 1::]) for i, x in enumerate(xi)] shape = [x.size for x in output] if indexing == 'xy' and ndim > 1: # switch first and second axis output[0].shape = (1, -1) + (1,)*(ndim - 2) output[1].shape = (-1, 1) + (1,)*(ndim - 2) shape[0], shape[1] = shape[1], shape[0] if sparse: if copy_: return [x.copy() for x in output] else: return output else: # Return the full N-D matrix (not only the 1-D vector) if copy_: mult_fact = np.ones(shape, dtype=int) return [x * mult_fact for x in output] else: return np.broadcast_arrays(*output) def delete(arr, obj, axis=None): """ Return a new array with sub-arrays along an axis deleted. For a one dimensional array, this returns those entries not returned by `arr[obj]`. Parameters ---------- arr : array_like Input array. obj : slice, int or array of ints Indicate which sub-arrays to remove. axis : int, optional The axis along which to delete the subarray defined by `obj`. If `axis` is None, `obj` is applied to the flattened array. Returns ------- out : ndarray A copy of `arr` with the elements specified by `obj` removed. Note that `delete` does not occur in-place. If `axis` is None, `out` is a flattened array. See Also -------- insert : Insert elements into an array. append : Append elements at the end of an array. Notes ----- Often it is preferable to use a boolean mask. For example: >>> mask = np.ones(len(arr), dtype=bool) >>> mask[[0,2,4]] = False >>> result = arr[mask,...] Is equivalent to `np.delete(arr, [0,2,4], axis=0)`, but allows further use of `mask`. Examples -------- >>> arr = np.array([[1,2,3,4], [5,6,7,8], [9,10,11,12]]) >>> arr array([[ 1, 2, 3, 4], [ 5, 6, 7, 8], [ 9, 10, 11, 12]]) >>> np.delete(arr, 1, 0) array([[ 1, 2, 3, 4], [ 9, 10, 11, 12]]) >>> np.delete(arr, np.s_[::2], 1) array([[ 2, 4], [ 6, 8], [10, 12]]) >>> np.delete(arr, [1,3,5], None) array([ 1, 3, 5, 7, 8, 9, 10, 11, 12]) """ wrap = None if type(arr) is not ndarray: try: wrap = arr.__array_wrap__ except AttributeError: pass arr = asarray(arr) ndim = arr.ndim if axis is None: if ndim != 1: arr = arr.ravel() ndim = arr.ndim axis = ndim - 1 if ndim == 0: # 2013-09-24, 1.9 warnings.warn( "in the future the special handling of scalars will be removed " "from delete and raise an error", DeprecationWarning) if wrap: return wrap(arr) else: return arr.copy() slobj = [slice(None)]*ndim N = arr.shape[axis] newshape = list(arr.shape) if isinstance(obj, slice): start, stop, step = obj.indices(N) xr = range(start, stop, step) numtodel = len(xr) if numtodel <= 0: if wrap: return wrap(arr.copy()) else: return arr.copy() # Invert if step is negative: if step < 0: step = -step start = xr[-1] stop = xr[0] + 1 newshape[axis] -= numtodel new = empty(newshape, arr.dtype, arr.flags.fnc) # copy initial chunk if start == 0: pass else: slobj[axis] = slice(None, start) new[slobj] = arr[slobj] # copy end chunck if stop == N: pass else: slobj[axis] = slice(stop-numtodel, None) slobj2 = [slice(None)]*ndim slobj2[axis] = slice(stop, None) new[slobj] = arr[slobj2] # copy middle pieces if step == 1: pass else: # use array indexing. keep = ones(stop-start, dtype=bool) keep[:stop-start:step] = False slobj[axis] = slice(start, stop-numtodel) slobj2 = [slice(None)]*ndim slobj2[axis] = slice(start, stop) arr = arr[slobj2] slobj2[axis] = keep new[slobj] = arr[slobj2] if wrap: return wrap(new) else: return new _obj = obj obj = np.asarray(obj) # After removing the special handling of booleans and out of # bounds values, the conversion to the array can be removed. if obj.dtype == bool: warnings.warn( "in the future insert will treat boolean arrays and array-likes " "as boolean index instead of casting it to integer", FutureWarning) obj = obj.astype(intp) if isinstance(_obj, (int, long, integer)): # optimization for a single value obj = obj.item() if (obj < -N or obj >= N): raise IndexError( "index %i is out of bounds for axis %i with " "size %i" % (obj, axis, N)) if (obj < 0): obj += N newshape[axis] -= 1 new = empty(newshape, arr.dtype, arr.flags.fnc) slobj[axis] = slice(None, obj) new[slobj] = arr[slobj] slobj[axis] = slice(obj, None) slobj2 = [slice(None)]*ndim slobj2[axis] = slice(obj+1, None) new[slobj] = arr[slobj2] else: if obj.size == 0 and not isinstance(_obj, np.ndarray): obj = obj.astype(intp) if not np.can_cast(obj, intp, 'same_kind'): # obj.size = 1 special case always failed and would just # give superfluous warnings. # 2013-09-24, 1.9 warnings.warn( "using a non-integer array as obj in delete will result in an " "error in the future", DeprecationWarning) obj = obj.astype(intp) keep = ones(N, dtype=bool) # Test if there are out of bound indices, this is deprecated inside_bounds = (obj < N) & (obj >= -N) if not inside_bounds.all(): # 2013-09-24, 1.9 warnings.warn( "in the future out of bounds indices will raise an error " "instead of being ignored by `numpy.delete`.", DeprecationWarning) obj = obj[inside_bounds] positive_indices = obj >= 0 if not positive_indices.all(): warnings.warn( "in the future negative indices will not be ignored by " "`numpy.delete`.", FutureWarning) obj = obj[positive_indices] keep[obj, ] = False slobj[axis] = keep new = arr[slobj] if wrap: return wrap(new) else: return new def insert(arr, obj, values, axis=None): """ Insert values along the given axis before the given indices. Parameters ---------- arr : array_like Input array. obj : int, slice or sequence of ints Object that defines the index or indices before which `values` is inserted. .. versionadded:: 1.8.0 Support for multiple insertions when `obj` is a single scalar or a sequence with one element (similar to calling insert multiple times). values : array_like Values to insert into `arr`. If the type of `values` is different from that of `arr`, `values` is converted to the type of `arr`. `values` should be shaped so that ``arr[...,obj,...] = values`` is legal. axis : int, optional Axis along which to insert `values`. If `axis` is None then `arr` is flattened first. Returns ------- out : ndarray A copy of `arr` with `values` inserted. Note that `insert` does not occur in-place: a new array is returned. If `axis` is None, `out` is a flattened array. See Also -------- append : Append elements at the end of an array. concatenate : Join a sequence of arrays along an existing axis. delete : Delete elements from an array. Notes ----- Note that for higher dimensional inserts `obj=0` behaves very different from `obj=[0]` just like `arr[:,0,:] = values` is different from `arr[:,[0],:] = values`. Examples -------- >>> a = np.array([[1, 1], [2, 2], [3, 3]]) >>> a array([[1, 1], [2, 2], [3, 3]]) >>> np.insert(a, 1, 5) array([1, 5, 1, 2, 2, 3, 3]) >>> np.insert(a, 1, 5, axis=1) array([[1, 5, 1], [2, 5, 2], [3, 5, 3]]) Difference between sequence and scalars: >>> np.insert(a, [1], [[1],[2],[3]], axis=1) array([[1, 1, 1], [2, 2, 2], [3, 3, 3]]) >>> np.array_equal(np.insert(a, 1, [1, 2, 3], axis=1), ... np.insert(a, [1], [[1],[2],[3]], axis=1)) True >>> b = a.flatten() >>> b array([1, 1, 2, 2, 3, 3]) >>> np.insert(b, [2, 2], [5, 6]) array([1, 1, 5, 6, 2, 2, 3, 3]) >>> np.insert(b, slice(2, 4), [5, 6]) array([1, 1, 5, 2, 6, 2, 3, 3]) >>> np.insert(b, [2, 2], [7.13, False]) # type casting array([1, 1, 7, 0, 2, 2, 3, 3]) >>> x = np.arange(8).reshape(2, 4) >>> idx = (1, 3) >>> np.insert(x, idx, 999, axis=1) array([[ 0, 999, 1, 2, 999, 3], [ 4, 999, 5, 6, 999, 7]]) """ wrap = None if type(arr) is not ndarray: try: wrap = arr.__array_wrap__ except AttributeError: pass arr = asarray(arr) ndim = arr.ndim if axis is None: if ndim != 1: arr = arr.ravel() ndim = arr.ndim axis = ndim - 1 else: if ndim > 0 and (axis < -ndim or axis >= ndim): raise IndexError( "axis %i is out of bounds for an array of " "dimension %i" % (axis, ndim)) if (axis < 0): axis += ndim if (ndim == 0): # 2013-09-24, 1.9 warnings.warn( "in the future the special handling of scalars will be removed " "from insert and raise an error", DeprecationWarning) arr = arr.copy() arr[...] = values if wrap: return wrap(arr) else: return arr slobj = [slice(None)]*ndim N = arr.shape[axis] newshape = list(arr.shape) if isinstance(obj, slice): # turn it into a range object indices = arange(*obj.indices(N), **{'dtype': intp}) else: # need to copy obj, because indices will be changed in-place indices = np.array(obj) if indices.dtype == bool: # See also delete warnings.warn( "in the future insert will treat boolean arrays and " "array-likes as a boolean index instead of casting it to " "integer", FutureWarning) indices = indices.astype(intp) # Code after warning period: #if obj.ndim != 1: # raise ValueError('boolean array argument obj to insert ' # 'must be one dimensional') #indices = np.flatnonzero(obj) elif indices.ndim > 1: raise ValueError( "index array argument obj to insert must be one dimensional " "or scalar") if indices.size == 1: index = indices.item() if index < -N or index > N: raise IndexError( "index %i is out of bounds for axis %i with " "size %i" % (obj, axis, N)) if (index < 0): index += N # There are some object array corner cases here, but we cannot avoid # that: values = array(values, copy=False, ndmin=arr.ndim, dtype=arr.dtype) if indices.ndim == 0: # broadcasting is very different here, since a[:,0,:] = ... behaves # very different from a[:,[0],:] = ...! This changes values so that # it works likes the second case. (here a[:,0:1,:]) values = np.rollaxis(values, 0, (axis % values.ndim) + 1) numnew = values.shape[axis] newshape[axis] += numnew new = empty(newshape, arr.dtype, arr.flags.fnc) slobj[axis] = slice(None, index) new[slobj] = arr[slobj] slobj[axis] = slice(index, index+numnew) new[slobj] = values slobj[axis] = slice(index+numnew, None) slobj2 = [slice(None)] * ndim slobj2[axis] = slice(index, None) new[slobj] = arr[slobj2] if wrap: return wrap(new) return new elif indices.size == 0 and not isinstance(obj, np.ndarray): # Can safely cast the empty list to intp indices = indices.astype(intp) if not np.can_cast(indices, intp, 'same_kind'): # 2013-09-24, 1.9 warnings.warn( "using a non-integer array as obj in insert will result in an " "error in the future", DeprecationWarning) indices = indices.astype(intp) indices[indices < 0] += N numnew = len(indices) order = indices.argsort(kind='mergesort') # stable sort indices[order] += np.arange(numnew) newshape[axis] += numnew old_mask = ones(newshape[axis], dtype=bool) old_mask[indices] = False new = empty(newshape, arr.dtype, arr.flags.fnc) slobj2 = [slice(None)]*ndim slobj[axis] = indices slobj2[axis] = old_mask new[slobj] = values new[slobj2] = arr if wrap: return wrap(new) return new def append(arr, values, axis=None): """ Append values to the end of an array. Parameters ---------- arr : array_like Values are appended to a copy of this array. values : array_like These values are appended to a copy of `arr`. It must be of the correct shape (the same shape as `arr`, excluding `axis`). If `axis` is not specified, `values` can be any shape and will be flattened before use. axis : int, optional The axis along which `values` are appended. If `axis` is not given, both `arr` and `values` are flattened before use. Returns ------- append : ndarray A copy of `arr` with `values` appended to `axis`. Note that `append` does not occur in-place: a new array is allocated and filled. If `axis` is None, `out` is a flattened array. See Also -------- insert : Insert elements into an array. delete : Delete elements from an array. Examples -------- >>> np.append([1, 2, 3], [[4, 5, 6], [7, 8, 9]]) array([1, 2, 3, 4, 5, 6, 7, 8, 9]) When `axis` is specified, `values` must have the correct shape. >>> np.append([[1, 2, 3], [4, 5, 6]], [[7, 8, 9]], axis=0) array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]) >>> np.append([[1, 2, 3], [4, 5, 6]], [7, 8, 9], axis=0) Traceback (most recent call last): ... ValueError: arrays must have same number of dimensions """ arr = asanyarray(arr) if axis is None: if arr.ndim != 1: arr = arr.ravel() values = ravel(values) axis = arr.ndim-1 return concatenate((arr, values), axis=axis)
bsd-3-clause
slundberg/shap
tests/explainers/common.py
1
5184
import tempfile import numpy as np import pytest import shap def basic_xgboost_scenario(max_samples=None, dataset=shap.datasets.adult): """ Create a basic XGBoost model on a data set. """ xgboost = pytest.importorskip('xgboost') # get a dataset on income prediction X, y = dataset() if max_samples is not None: X = X.iloc[:max_samples] y = y[:max_samples] X = X.values # train an XGBoost model (but any other model type would also work) model = xgboost.XGBClassifier() model.fit(X, y) return model, X def basic_translation_scenario(): """ Create a basic transformers translation model and tokenizer. """ AutoTokenizer = pytest.importorskip("transformers").AutoTokenizer AutoModelForSeq2SeqLM = pytest.importorskip("transformers").AutoModelForSeq2SeqLM tokenizer = AutoTokenizer.from_pretrained("Helsinki-NLP/opus-mt-en-es") model = AutoModelForSeq2SeqLM.from_pretrained("Helsinki-NLP/opus-mt-en-es") # define the input sentences we want to translate data = [ "In this picture, there are four persons: my father, my mother, my brother and my sister.", "Transformers have rapidly become the model of choice for NLP problems, replacing older recurrent neural network models" ] return model, tokenizer, data def test_additivity(explainer_type, model, masker, data, **kwargs): """ Test explainer and masker for additivity on a single output prediction problem. """ explainer = explainer_type(model, masker, **kwargs) shap_values = explainer(data) # a multi-output additivity check if len(shap_values.shape) == 3: # this works with ragged arrays and for models that we can't call directly (they get auto-wrapped) for i in range(shap_values.shape[0]): row = shap_values[i] if callable(explainer.masker.shape): all_on_masked = explainer.masker(np.ones(explainer.masker.shape(data[i])[1], dtype=np.bool), data[i]) else: all_on_masked = explainer.masker(np.ones(explainer.masker.shape[1], dtype=np.bool), data[i]) if not isinstance(all_on_masked, tuple): all_on_masked = (all_on_masked,) out = explainer.model(*all_on_masked) assert np.max(np.abs(row.base_values + row.values.sum(0) - out) < 1e6) else: assert np.max(np.abs(shap_values.base_values + shap_values.values.sum(1) - model(data)) < 1e6) def test_interactions_additivity(explainer_type, model, masker, data, **kwargs): """ Test explainer and masker for additivity on a single output prediction problem. """ explainer = explainer_type(model, masker, **kwargs) shap_values = explainer(data, interactions=True) assert np.max(np.abs(shap_values.base_values + shap_values.values.sum((1, 2)) - model(data)) < 1e6) # def test_multi_class(explainer_type, model, masker, data, **kwargs): # """ Test explainer and masker for additivity on a multi-class prediction problem. # """ # explainer_kwargs = {k: kwargs[k] for k in kwargs if k in ["algorithm"]} # explainer = explainer_type(model.predict_proba, masker, **explainer_kwargs) # shap_values = explainer(data) # assert np.max(np.abs(shap_values.base_values + shap_values.values.sum(1) - model.predict_proba(data)) < 1e6) # def test_interactions(explainer_type): # """ Check that second order interactions have additivity. # """ # model, X = basic_xgboost(100) # # build an Exact explainer and explain the model predictions on the given dataset # explainer = explainer_type(model.predict, X) # shap_values = explainer(X, interactions=True) # assert np.max(np.abs(shap_values.base_values + shap_values.values.sum((1, 2)) - model.predict(X[:100])) < 1e6) def test_serialization(explainer_type, model, masker, data, rtol=1e-05, atol=1e-8, **kwargs): """ Test serialization with a given explainer algorithm. """ explainer_kwargs = {k: v for k,v in kwargs.items() if k in ["algorithm"]} explainer_original = explainer_type(model, masker, **explainer_kwargs) shap_values_original = explainer_original(data[:1]) # Serialization with tempfile.TemporaryFile() as temp_serialization_file: save_kwargs = {k: v for k,v in kwargs.items() if k in ["model_saver", "masker_saver"]} explainer_original.save(temp_serialization_file, **save_kwargs) # Deserialization temp_serialization_file.seek(0) load_kwargs = {k: v for k,v in kwargs.items() if k in ["model_loader", "masker_loader"]} explainer_new = explainer_type.load(temp_serialization_file, **load_kwargs) call_kwargs = {k: v for k,v in kwargs.items() if k in ["max_evals"]} shap_values_new = explainer_new(data[:1], **call_kwargs) assert np.allclose(shap_values_original.base_values, shap_values_new.base_values, rtol=rtol, atol=atol) assert np.allclose(shap_values_original[0].values, shap_values_new[0].values, rtol=rtol, atol=atol) assert isinstance(explainer_original, type(explainer_new)) assert isinstance(explainer_original.masker, type(explainer_new.masker))
mit
kyleniemeyer/PyTeCK
pyteck/__main__.py
1
3466
from argparse import ArgumentParser import multiprocessing from .eval_model import evaluate_model parser = ArgumentParser(description='PyTeCK: Evaluate ' 'performance of kinetic models using ' 'experimental ignition delay data.' ) parser.add_argument('-m', '--model', type=str, required=True, help='Input model filename (e.g., mech.cti).' ) parser.add_argument('-k', '--model-keys', type=str, dest='model_keys_file', required=True, help='YAML file with keys for species in models.' ) parser.add_argument('-d', '--dataset', type=str, required=True, help='Filename for list of datasets.' ) parser.add_argument('-dp', '--data-path', type=str, dest='data_path', required=False, default='data', help='Local directory holding dataset files.' ) parser.add_argument('-mp', '--model-path', type=str, dest='model_path', required=False, default='models', help='Local directory holding model files.' ) parser.add_argument('-rp', '--results-path', type=str, dest='results_path', required=False, default='results', help='Local directory holding result HDF5 files.' ) parser.add_argument('-v', '--model-variant', type=str, dest='model_variant_file', required=False, help='YAML with variants for models for, e.g., bath ' 'gases and pressures.' ) parser.add_argument('-nt', '--num-threads', type=int, dest='num_threads', default=multiprocessing.cpu_count()-1 or 1, required=False, help='The number of threads to use to run simulations in ' 'parallel.' ) parser.add_argument('-p', '--print', dest='print_results', action='store_true', default=False, help='Print model evaluation results to screen.' ) parser.add_argument('--restart', dest='restart', action='store_true', default=False, help='Reuse prior results files, and only calculate new ones.' ) parser.add_argument('--skip-validation', dest='skip_validation', action='store_true', default=False, help='Skips ChemKED file validation.' ) args = parser.parse_args() evaluate_model(args.model, args.model_keys_file, args.dataset, args.data_path, args.model_path, args.results_path, args.model_variant_file, args.num_threads, args.print_results, args.restart, args.skip_validation, )
mit
ndingwall/scikit-learn
sklearn/utils/fixes.py
1
7209
"""Compatibility fixes for older version of python, numpy and scipy If you add content to this file, please give the version of the package at which the fixe is no longer needed. """ # Authors: Emmanuelle Gouillart <emmanuelle.gouillart@normalesup.org> # Gael Varoquaux <gael.varoquaux@normalesup.org> # Fabian Pedregosa <fpedregosa@acm.org> # Lars Buitinck # # License: BSD 3 clause from functools import update_wrapper from distutils.version import LooseVersion import functools import numpy as np import scipy.sparse as sp import scipy import scipy.stats from scipy.sparse.linalg import lsqr as sparse_lsqr # noqa from numpy.ma import MaskedArray as _MaskedArray # TODO: remove in 0.25 from .._config import config_context, get_config from .deprecation import deprecated try: from pkg_resources import parse_version # type: ignore except ImportError: # setuptools not installed parse_version = LooseVersion # type: ignore np_version = parse_version(np.__version__) sp_version = parse_version(scipy.__version__) if sp_version >= parse_version('1.4'): from scipy.sparse.linalg import lobpcg else: # Backport of lobpcg functionality from scipy 1.4.0, can be removed # once support for sp_version < parse_version('1.4') is dropped # mypy error: Name 'lobpcg' already defined (possibly by an import) from ..externals._lobpcg import lobpcg # type: ignore # noqa def _object_dtype_isnan(X): return X != X # TODO: replace by copy=False, when only scipy > 1.1 is supported. def _astype_copy_false(X): """Returns the copy=False parameter for {ndarray, csr_matrix, csc_matrix}.astype when possible, otherwise don't specify """ if sp_version >= parse_version('1.1') or not sp.issparse(X): return {'copy': False} else: return {} def _joblib_parallel_args(**kwargs): """Set joblib.Parallel arguments in a compatible way for 0.11 and 0.12+ For joblib 0.11 this maps both ``prefer`` and ``require`` parameters to a specific ``backend``. Parameters ---------- prefer : str in {'processes', 'threads'} or None Soft hint to choose the default backend if no specific backend was selected with the parallel_backend context manager. require : 'sharedmem' or None Hard condstraint to select the backend. If set to 'sharedmem', the selected backend will be single-host and thread-based even if the user asked for a non-thread based backend with parallel_backend. See joblib.Parallel documentation for more details """ import joblib if parse_version(joblib.__version__) >= parse_version('0.12'): return kwargs extra_args = set(kwargs.keys()).difference({'prefer', 'require'}) if extra_args: raise NotImplementedError('unhandled arguments %s with joblib %s' % (list(extra_args), joblib.__version__)) args = {} if 'prefer' in kwargs: prefer = kwargs['prefer'] if prefer not in ['threads', 'processes', None]: raise ValueError('prefer=%s is not supported' % prefer) args['backend'] = {'threads': 'threading', 'processes': 'multiprocessing', None: None}[prefer] if 'require' in kwargs: require = kwargs['require'] if require not in [None, 'sharedmem']: raise ValueError('require=%s is not supported' % require) if require == 'sharedmem': args['backend'] = 'threading' return args class loguniform(scipy.stats.reciprocal): """A class supporting log-uniform random variables. Parameters ---------- low : float The minimum value high : float The maximum value Methods ------- rvs(self, size=None, random_state=None) Generate log-uniform random variables The most useful method for Scikit-learn usage is highlighted here. For a full list, see `scipy.stats.reciprocal <https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.reciprocal.html>`_. This list includes all functions of ``scipy.stats`` continuous distributions such as ``pdf``. Notes ----- This class generates values between ``low`` and ``high`` or low <= loguniform(low, high).rvs() <= high The logarithmic probability density function (PDF) is uniform. When ``x`` is a uniformly distributed random variable between 0 and 1, ``10**x`` are random variales that are equally likely to be returned. This class is an alias to ``scipy.stats.reciprocal``, which uses the reciprocal distribution: https://en.wikipedia.org/wiki/Reciprocal_distribution Examples -------- >>> from sklearn.utils.fixes import loguniform >>> rv = loguniform(1e-3, 1e1) >>> rvs = rv.rvs(random_state=42, size=1000) >>> rvs.min() # doctest: +SKIP 0.0010435856341129003 >>> rvs.max() # doctest: +SKIP 9.97403052786026 """ @deprecated( 'MaskedArray is deprecated in version 0.23 and will be removed in version ' '0.25. Use numpy.ma.MaskedArray instead.' ) class MaskedArray(_MaskedArray): pass # TODO: remove in 0.25 def _take_along_axis(arr, indices, axis): """Implements a simplified version of np.take_along_axis if numpy version < 1.15""" if np_version >= parse_version('1.15'): return np.take_along_axis(arr=arr, indices=indices, axis=axis) else: if axis is None: arr = arr.flatten() if not np.issubdtype(indices.dtype, np.intp): raise IndexError('`indices` must be an integer array') if arr.ndim != indices.ndim: raise ValueError( "`indices` and `arr` must have the same number of dimensions") shape_ones = (1,) * indices.ndim dest_dims = ( list(range(axis)) + [None] + list(range(axis+1, indices.ndim)) ) # build a fancy index, consisting of orthogonal aranges, with the # requested index inserted at the right location fancy_index = [] for dim, n in zip(dest_dims, arr.shape): if dim is None: fancy_index.append(indices) else: ind_shape = shape_ones[:dim] + (-1,) + shape_ones[dim+1:] fancy_index.append(np.arange(n).reshape(ind_shape)) fancy_index = tuple(fancy_index) return arr[fancy_index] # remove when https://github.com/joblib/joblib/issues/1071 is fixed def delayed(function): """Decorator used to capture the arguments of a function.""" @functools.wraps(function) def delayed_function(*args, **kwargs): return _FuncWrapper(function), args, kwargs return delayed_function class _FuncWrapper: """"Load the global configuration before calling the function.""" def __init__(self, function): self.function = function self.config = get_config() update_wrapper(self, self.function) def __call__(self, *args, **kwargs): with config_context(**self.config): return self.function(*args, **kwargs)
bsd-3-clause
ndingwall/scikit-learn
sklearn/random_projection.py
5
23301
# -*- coding: utf8 """Random Projection transformers. Random Projections are a simple and computationally efficient way to reduce the dimensionality of the data by trading a controlled amount of accuracy (as additional variance) for faster processing times and smaller model sizes. The dimensions and distribution of Random Projections matrices are controlled so as to preserve the pairwise distances between any two samples of the dataset. The main theoretical result behind the efficiency of random projection is the `Johnson-Lindenstrauss lemma (quoting Wikipedia) <https://en.wikipedia.org/wiki/Johnson%E2%80%93Lindenstrauss_lemma>`_: In mathematics, the Johnson-Lindenstrauss lemma is a result concerning low-distortion embeddings of points from high-dimensional into low-dimensional Euclidean space. The lemma states that a small set of points in a high-dimensional space can be embedded into a space of much lower dimension in such a way that distances between the points are nearly preserved. The map used for the embedding is at least Lipschitz, and can even be taken to be an orthogonal projection. """ # Authors: Olivier Grisel <olivier.grisel@ensta.org>, # Arnaud Joly <a.joly@ulg.ac.be> # License: BSD 3 clause import warnings from abc import ABCMeta, abstractmethod import numpy as np import scipy.sparse as sp from .base import BaseEstimator, TransformerMixin from .utils import check_random_state from .utils.extmath import safe_sparse_dot from .utils.random import sample_without_replacement from .utils.validation import check_array, check_is_fitted from .utils.validation import _deprecate_positional_args from .exceptions import DataDimensionalityWarning __all__ = ["SparseRandomProjection", "GaussianRandomProjection", "johnson_lindenstrauss_min_dim"] @_deprecate_positional_args def johnson_lindenstrauss_min_dim(n_samples, *, eps=0.1): """Find a 'safe' number of components to randomly project to. The distortion introduced by a random projection `p` only changes the distance between two points by a factor (1 +- eps) in an euclidean space with good probability. The projection `p` is an eps-embedding as defined by: (1 - eps) ||u - v||^2 < ||p(u) - p(v)||^2 < (1 + eps) ||u - v||^2 Where u and v are any rows taken from a dataset of shape (n_samples, n_features), eps is in ]0, 1[ and p is a projection by a random Gaussian N(0, 1) matrix of shape (n_components, n_features) (or a sparse Achlioptas matrix). The minimum number of components to guarantee the eps-embedding is given by: n_components >= 4 log(n_samples) / (eps^2 / 2 - eps^3 / 3) Note that the number of dimensions is independent of the original number of features but instead depends on the size of the dataset: the larger the dataset, the higher is the minimal dimensionality of an eps-embedding. Read more in the :ref:`User Guide <johnson_lindenstrauss>`. Parameters ---------- n_samples : int or array-like of int Number of samples that should be a integer greater than 0. If an array is given, it will compute a safe number of components array-wise. eps : float or ndarray of shape (n_components,), dtype=float, \ default=0.1 Maximum distortion rate in the range (0,1 ) as defined by the Johnson-Lindenstrauss lemma. If an array is given, it will compute a safe number of components array-wise. Returns ------- n_components : int or ndarray of int The minimal number of components to guarantee with good probability an eps-embedding with n_samples. Examples -------- >>> johnson_lindenstrauss_min_dim(1e6, eps=0.5) 663 >>> johnson_lindenstrauss_min_dim(1e6, eps=[0.5, 0.1, 0.01]) array([ 663, 11841, 1112658]) >>> johnson_lindenstrauss_min_dim([1e4, 1e5, 1e6], eps=0.1) array([ 7894, 9868, 11841]) References ---------- .. [1] https://en.wikipedia.org/wiki/Johnson%E2%80%93Lindenstrauss_lemma .. [2] Sanjoy Dasgupta and Anupam Gupta, 1999, "An elementary proof of the Johnson-Lindenstrauss Lemma." http://citeseer.ist.psu.edu/viewdoc/summary?doi=10.1.1.45.3654 """ eps = np.asarray(eps) n_samples = np.asarray(n_samples) if np.any(eps <= 0.0) or np.any(eps >= 1): raise ValueError( "The JL bound is defined for eps in ]0, 1[, got %r" % eps) if np.any(n_samples) <= 0: raise ValueError( "The JL bound is defined for n_samples greater than zero, got %r" % n_samples) denominator = (eps ** 2 / 2) - (eps ** 3 / 3) return (4 * np.log(n_samples) / denominator).astype(int) def _check_density(density, n_features): """Factorize density check according to Li et al.""" if density == 'auto': density = 1 / np.sqrt(n_features) elif density <= 0 or density > 1: raise ValueError("Expected density in range ]0, 1], got: %r" % density) return density def _check_input_size(n_components, n_features): """Factorize argument checking for random matrix generation.""" if n_components <= 0: raise ValueError("n_components must be strictly positive, got %d" % n_components) if n_features <= 0: raise ValueError("n_features must be strictly positive, got %d" % n_features) def _gaussian_random_matrix(n_components, n_features, random_state=None): """Generate a dense Gaussian random matrix. The components of the random matrix are drawn from N(0, 1.0 / n_components). Read more in the :ref:`User Guide <gaussian_random_matrix>`. Parameters ---------- n_components : int, Dimensionality of the target projection space. n_features : int, Dimensionality of the original source space. random_state : int, RandomState instance or None, default=None Controls the pseudo random number generator used to generate the matrix at fit time. Pass an int for reproducible output across multiple function calls. See :term:`Glossary <random_state>`. Returns ------- components : ndarray of shape (n_components, n_features) The generated Gaussian random matrix. See Also -------- GaussianRandomProjection """ _check_input_size(n_components, n_features) rng = check_random_state(random_state) components = rng.normal(loc=0.0, scale=1.0 / np.sqrt(n_components), size=(n_components, n_features)) return components def _sparse_random_matrix(n_components, n_features, density='auto', random_state=None): """Generalized Achlioptas random sparse matrix for random projection. Setting density to 1 / 3 will yield the original matrix by Dimitris Achlioptas while setting a lower value will yield the generalization by Ping Li et al. If we note :math:`s = 1 / density`, the components of the random matrix are drawn from: - -sqrt(s) / sqrt(n_components) with probability 1 / 2s - 0 with probability 1 - 1 / s - +sqrt(s) / sqrt(n_components) with probability 1 / 2s Read more in the :ref:`User Guide <sparse_random_matrix>`. Parameters ---------- n_components : int, Dimensionality of the target projection space. n_features : int, Dimensionality of the original source space. density : float or 'auto', default='auto' Ratio of non-zero component in the random projection matrix in the range `(0, 1]` If density = 'auto', the value is set to the minimum density as recommended by Ping Li et al.: 1 / sqrt(n_features). Use density = 1 / 3.0 if you want to reproduce the results from Achlioptas, 2001. random_state : int, RandomState instance or None, default=None Controls the pseudo random number generator used to generate the matrix at fit time. Pass an int for reproducible output across multiple function calls. See :term:`Glossary <random_state>`. Returns ------- components : {ndarray, sparse matrix} of shape (n_components, n_features) The generated Gaussian random matrix. Sparse matrix will be of CSR format. See Also -------- SparseRandomProjection References ---------- .. [1] Ping Li, T. Hastie and K. W. Church, 2006, "Very Sparse Random Projections". https://web.stanford.edu/~hastie/Papers/Ping/KDD06_rp.pdf .. [2] D. Achlioptas, 2001, "Database-friendly random projections", http://www.cs.ucsc.edu/~optas/papers/jl.pdf """ _check_input_size(n_components, n_features) density = _check_density(density, n_features) rng = check_random_state(random_state) if density == 1: # skip index generation if totally dense components = rng.binomial(1, 0.5, (n_components, n_features)) * 2 - 1 return 1 / np.sqrt(n_components) * components else: # Generate location of non zero elements indices = [] offset = 0 indptr = [offset] for _ in range(n_components): # find the indices of the non-zero components for row i n_nonzero_i = rng.binomial(n_features, density) indices_i = sample_without_replacement(n_features, n_nonzero_i, random_state=rng) indices.append(indices_i) offset += n_nonzero_i indptr.append(offset) indices = np.concatenate(indices) # Among non zero components the probability of the sign is 50%/50% data = rng.binomial(1, 0.5, size=np.size(indices)) * 2 - 1 # build the CSR structure by concatenating the rows components = sp.csr_matrix((data, indices, indptr), shape=(n_components, n_features)) return np.sqrt(1 / density) / np.sqrt(n_components) * components class BaseRandomProjection(TransformerMixin, BaseEstimator, metaclass=ABCMeta): """Base class for random projections. Warning: This class should not be used directly. Use derived classes instead. """ @abstractmethod def __init__(self, n_components='auto', *, eps=0.1, dense_output=False, random_state=None): self.n_components = n_components self.eps = eps self.dense_output = dense_output self.random_state = random_state @abstractmethod def _make_random_matrix(self, n_components, n_features): """Generate the random projection matrix. Parameters ---------- n_components : int, Dimensionality of the target projection space. n_features : int, Dimensionality of the original source space. Returns ------- components : {ndarray, sparse matrix} of shape \ (n_components, n_features) The generated random matrix. Sparse matrix will be of CSR format. """ def fit(self, X, y=None): """Generate a sparse random projection matrix. Parameters ---------- X : {ndarray, sparse matrix} of shape (n_samples, n_features) Training set: only the shape is used to find optimal random matrix dimensions based on the theory referenced in the afore mentioned papers. y Ignored Returns ------- self """ X = self._validate_data(X, accept_sparse=['csr', 'csc']) n_samples, n_features = X.shape if self.n_components == 'auto': self.n_components_ = johnson_lindenstrauss_min_dim( n_samples=n_samples, eps=self.eps) if self.n_components_ <= 0: raise ValueError( 'eps=%f and n_samples=%d lead to a target dimension of ' '%d which is invalid' % ( self.eps, n_samples, self.n_components_)) elif self.n_components_ > n_features: raise ValueError( 'eps=%f and n_samples=%d lead to a target dimension of ' '%d which is larger than the original space with ' 'n_features=%d' % (self.eps, n_samples, self.n_components_, n_features)) else: if self.n_components <= 0: raise ValueError("n_components must be greater than 0, got %s" % self.n_components) elif self.n_components > n_features: warnings.warn( "The number of components is higher than the number of" " features: n_features < n_components (%s < %s)." "The dimensionality of the problem will not be reduced." % (n_features, self.n_components), DataDimensionalityWarning) self.n_components_ = self.n_components # Generate a projection matrix of size [n_components, n_features] self.components_ = self._make_random_matrix(self.n_components_, n_features) # Check contract assert self.components_.shape == (self.n_components_, n_features), ( 'An error has occurred the self.components_ matrix has ' ' not the proper shape.') return self def transform(self, X): """Project the data by using matrix product with the random matrix Parameters ---------- X : {ndarray, sparse matrix} of shape (n_samples, n_features) The input data to project into a smaller dimensional space. Returns ------- X_new : {ndarray, sparse matrix} of shape (n_samples, n_components) Projected array. """ X = check_array(X, accept_sparse=['csr', 'csc']) check_is_fitted(self) if X.shape[1] != self.components_.shape[1]: raise ValueError( 'Impossible to perform projection:' 'X at fit stage had a different number of features. ' '(%s != %s)' % (X.shape[1], self.components_.shape[1])) X_new = safe_sparse_dot(X, self.components_.T, dense_output=self.dense_output) return X_new class GaussianRandomProjection(BaseRandomProjection): """Reduce dimensionality through Gaussian random projection. The components of the random matrix are drawn from N(0, 1 / n_components). Read more in the :ref:`User Guide <gaussian_random_matrix>`. .. versionadded:: 0.13 Parameters ---------- n_components : int or 'auto', default='auto' Dimensionality of the target projection space. n_components can be automatically adjusted according to the number of samples in the dataset and the bound given by the Johnson-Lindenstrauss lemma. In that case the quality of the embedding is controlled by the ``eps`` parameter. It should be noted that Johnson-Lindenstrauss lemma can yield very conservative estimated of the required number of components as it makes no assumption on the structure of the dataset. eps : float, default=0.1 Parameter to control the quality of the embedding according to the Johnson-Lindenstrauss lemma when `n_components` is set to 'auto'. The value should be strictly positive. Smaller values lead to better embedding and higher number of dimensions (n_components) in the target projection space. random_state : int, RandomState instance or None, default=None Controls the pseudo random number generator used to generate the projection matrix at fit time. Pass an int for reproducible output across multiple function calls. See :term:`Glossary <random_state>`. Attributes ---------- n_components_ : int Concrete number of components computed when n_components="auto". components_ : ndarray of shape (n_components, n_features) Random matrix used for the projection. Examples -------- >>> import numpy as np >>> from sklearn.random_projection import GaussianRandomProjection >>> rng = np.random.RandomState(42) >>> X = rng.rand(100, 10000) >>> transformer = GaussianRandomProjection(random_state=rng) >>> X_new = transformer.fit_transform(X) >>> X_new.shape (100, 3947) See Also -------- SparseRandomProjection """ @_deprecate_positional_args def __init__(self, n_components='auto', *, eps=0.1, random_state=None): super().__init__( n_components=n_components, eps=eps, dense_output=True, random_state=random_state) def _make_random_matrix(self, n_components, n_features): """ Generate the random projection matrix. Parameters ---------- n_components : int, Dimensionality of the target projection space. n_features : int, Dimensionality of the original source space. Returns ------- components : {ndarray, sparse matrix} of shape \ (n_components, n_features) The generated random matrix. Sparse matrix will be of CSR format. """ random_state = check_random_state(self.random_state) return _gaussian_random_matrix(n_components, n_features, random_state=random_state) class SparseRandomProjection(BaseRandomProjection): """Reduce dimensionality through sparse random projection. Sparse random matrix is an alternative to dense random projection matrix that guarantees similar embedding quality while being much more memory efficient and allowing faster computation of the projected data. If we note `s = 1 / density` the components of the random matrix are drawn from: - -sqrt(s) / sqrt(n_components) with probability 1 / 2s - 0 with probability 1 - 1 / s - +sqrt(s) / sqrt(n_components) with probability 1 / 2s Read more in the :ref:`User Guide <sparse_random_matrix>`. .. versionadded:: 0.13 Parameters ---------- n_components : int or 'auto', default='auto' Dimensionality of the target projection space. n_components can be automatically adjusted according to the number of samples in the dataset and the bound given by the Johnson-Lindenstrauss lemma. In that case the quality of the embedding is controlled by the ``eps`` parameter. It should be noted that Johnson-Lindenstrauss lemma can yield very conservative estimated of the required number of components as it makes no assumption on the structure of the dataset. density : float or 'auto', default='auto' Ratio in the range (0, 1] of non-zero component in the random projection matrix. If density = 'auto', the value is set to the minimum density as recommended by Ping Li et al.: 1 / sqrt(n_features). Use density = 1 / 3.0 if you want to reproduce the results from Achlioptas, 2001. eps : float, default=0.1 Parameter to control the quality of the embedding according to the Johnson-Lindenstrauss lemma when n_components is set to 'auto'. This value should be strictly positive. Smaller values lead to better embedding and higher number of dimensions (n_components) in the target projection space. dense_output : bool, default=False If True, ensure that the output of the random projection is a dense numpy array even if the input and random projection matrix are both sparse. In practice, if the number of components is small the number of zero components in the projected data will be very small and it will be more CPU and memory efficient to use a dense representation. If False, the projected data uses a sparse representation if the input is sparse. random_state : int, RandomState instance or None, default=None Controls the pseudo random number generator used to generate the projection matrix at fit time. Pass an int for reproducible output across multiple function calls. See :term:`Glossary <random_state>`. Attributes ---------- n_components_ : int Concrete number of components computed when n_components="auto". components_ : sparse matrix of shape (n_components, n_features) Random matrix used for the projection. Sparse matrix will be of CSR format. density_ : float in range 0.0 - 1.0 Concrete density computed from when density = "auto". Examples -------- >>> import numpy as np >>> from sklearn.random_projection import SparseRandomProjection >>> rng = np.random.RandomState(42) >>> X = rng.rand(100, 10000) >>> transformer = SparseRandomProjection(random_state=rng) >>> X_new = transformer.fit_transform(X) >>> X_new.shape (100, 3947) >>> # very few components are non-zero >>> np.mean(transformer.components_ != 0) 0.0100... See Also -------- GaussianRandomProjection References ---------- .. [1] Ping Li, T. Hastie and K. W. Church, 2006, "Very Sparse Random Projections". https://web.stanford.edu/~hastie/Papers/Ping/KDD06_rp.pdf .. [2] D. Achlioptas, 2001, "Database-friendly random projections", https://users.soe.ucsc.edu/~optas/papers/jl.pdf """ @_deprecate_positional_args def __init__(self, n_components='auto', *, density='auto', eps=0.1, dense_output=False, random_state=None): super().__init__( n_components=n_components, eps=eps, dense_output=dense_output, random_state=random_state) self.density = density def _make_random_matrix(self, n_components, n_features): """ Generate the random projection matrix Parameters ---------- n_components : int Dimensionality of the target projection space. n_features : int Dimensionality of the original source space. Returns ------- components : {ndarray, sparse matrix} of shape \ (n_components, n_features) The generated random matrix. Sparse matrix will be of CSR format. """ random_state = check_random_state(self.random_state) self.density_ = _check_density(self.density, n_features) return _sparse_random_matrix(n_components, n_features, density=self.density_, random_state=random_state)
bsd-3-clause
ElDeveloper/scikit-learn
sklearn/cluster/mean_shift_.py
7
15079
"""Mean shift clustering algorithm. Mean shift clustering aims to discover *blobs* in a smooth density of samples. It is a centroid based algorithm, which works by updating candidates for centroids to be the mean of the points within a given region. These candidates are then filtered in a post-processing stage to eliminate near-duplicates to form the final set of centroids. Seeding is performed using a binning technique for scalability. """ # Authors: Conrad Lee <conradlee@gmail.com> # Alexandre Gramfort <alexandre.gramfort@inria.fr> # Gael Varoquaux <gael.varoquaux@normalesup.org> # Martino Sorbaro <martino.sorbaro@ed.ac.uk> import numpy as np import warnings from collections import defaultdict from ..externals import six from ..utils.validation import check_is_fitted from ..utils import extmath, check_random_state, gen_batches, check_array from ..base import BaseEstimator, ClusterMixin from ..neighbors import NearestNeighbors from ..metrics.pairwise import pairwise_distances_argmin from ..externals.joblib import Parallel from ..externals.joblib import delayed def estimate_bandwidth(X, quantile=0.3, n_samples=None, random_state=0): """Estimate the bandwidth to use with the mean-shift algorithm. That this function takes time at least quadratic in n_samples. For large datasets, it's wise to set that parameter to a small value. Parameters ---------- X : array-like, shape=[n_samples, n_features] Input points. quantile : float, default 0.3 should be between [0, 1] 0.5 means that the median of all pairwise distances is used. n_samples : int, optional The number of samples to use. If not given, all samples are used. random_state : int or RandomState Pseudo-random number generator state used for random sampling. Returns ------- bandwidth : float The bandwidth parameter. """ random_state = check_random_state(random_state) if n_samples is not None: idx = random_state.permutation(X.shape[0])[:n_samples] X = X[idx] nbrs = NearestNeighbors(n_neighbors=int(X.shape[0] * quantile)) nbrs.fit(X) bandwidth = 0. for batch in gen_batches(len(X), 500): d, _ = nbrs.kneighbors(X[batch, :], return_distance=True) bandwidth += np.max(d, axis=1).sum() return bandwidth / X.shape[0] # separate function for each seed's iterative loop def _mean_shift_single_seed(my_mean, X, nbrs, max_iter): # For each seed, climb gradient until convergence or max_iter bandwidth = nbrs.get_params()['radius'] stop_thresh = 1e-3 * bandwidth # when mean has converged completed_iterations = 0 while True: # Find mean of points within bandwidth i_nbrs = nbrs.radius_neighbors([my_mean], bandwidth, return_distance=False)[0] points_within = X[i_nbrs] if len(points_within) == 0: break # Depending on seeding strategy this condition may occur my_old_mean = my_mean # save the old mean my_mean = np.mean(points_within, axis=0) # If converged or at max_iter, adds the cluster if (extmath.norm(my_mean - my_old_mean) < stop_thresh or completed_iterations == max_iter): return tuple(my_mean), len(points_within) completed_iterations += 1 def mean_shift(X, bandwidth=None, seeds=None, bin_seeding=False, min_bin_freq=1, cluster_all=True, max_iter=300, n_jobs=1): """Perform mean shift clustering of data using a flat kernel. Read more in the :ref:`User Guide <mean_shift>`. Parameters ---------- X : array-like, shape=[n_samples, n_features] Input data. bandwidth : float, optional Kernel bandwidth. If bandwidth is not given, it is determined using a heuristic based on the median of all pairwise distances. This will take quadratic time in the number of samples. The sklearn.cluster.estimate_bandwidth function can be used to do this more efficiently. seeds : array-like, shape=[n_seeds, n_features] or None Point used as initial kernel locations. If None and bin_seeding=False, each data point is used as a seed. If None and bin_seeding=True, see bin_seeding. bin_seeding : boolean, default=False If true, initial kernel locations are not locations of all points, but rather the location of the discretized version of points, where points are binned onto a grid whose coarseness corresponds to the bandwidth. Setting this option to True will speed up the algorithm because fewer seeds will be initialized. Ignored if seeds argument is not None. min_bin_freq : int, default=1 To speed up the algorithm, accept only those bins with at least min_bin_freq points as seeds. cluster_all : boolean, default True If true, then all points are clustered, even those orphans that are not within any kernel. Orphans are assigned to the nearest kernel. If false, then orphans are given cluster label -1. max_iter : int, default 300 Maximum number of iterations, per seed point before the clustering operation terminates (for that seed point), if has not converged yet. n_jobs : int The number of jobs to use for the computation. This works by computing each of the n_init runs in parallel. If -1 all CPUs are used. If 1 is given, no parallel computing code is used at all, which is useful for debugging. For n_jobs below -1, (n_cpus + 1 + n_jobs) are used. Thus for n_jobs = -2, all CPUs but one are used. Returns ------- cluster_centers : array, shape=[n_clusters, n_features] Coordinates of cluster centers. labels : array, shape=[n_samples] Cluster labels for each point. Notes ----- See examples/cluster/plot_meanshift.py for an example. """ if bandwidth is None: bandwidth = estimate_bandwidth(X) elif bandwidth <= 0: raise ValueError("bandwidth needs to be greater than zero or None,\ got %f" % bandwidth) if seeds is None: if bin_seeding: seeds = get_bin_seeds(X, bandwidth, min_bin_freq) else: seeds = X n_samples, n_features = X.shape center_intensity_dict = {} nbrs = NearestNeighbors(radius=bandwidth).fit(X) # execute iterations on all seeds in parallel all_res = Parallel(n_jobs=n_jobs)( delayed(_mean_shift_single_seed) (seed, X, nbrs, max_iter) for seed in seeds) # copy results in a dictionary for i in range(len(seeds)): if all_res[i] is not None: center_intensity_dict[all_res[i][0]] = all_res[i][1] if not center_intensity_dict: # nothing near seeds raise ValueError("No point was within bandwidth=%f of any seed." " Try a different seeding strategy \ or increase the bandwidth." % bandwidth) # POST PROCESSING: remove near duplicate points # If the distance between two kernels is less than the bandwidth, # then we have to remove one because it is a duplicate. Remove the # one with fewer points. sorted_by_intensity = sorted(center_intensity_dict.items(), key=lambda tup: tup[1], reverse=True) sorted_centers = np.array([tup[0] for tup in sorted_by_intensity]) unique = np.ones(len(sorted_centers), dtype=np.bool) nbrs = NearestNeighbors(radius=bandwidth).fit(sorted_centers) for i, center in enumerate(sorted_centers): if unique[i]: neighbor_idxs = nbrs.radius_neighbors([center], return_distance=False)[0] unique[neighbor_idxs] = 0 unique[i] = 1 # leave the current point as unique cluster_centers = sorted_centers[unique] # ASSIGN LABELS: a point belongs to the cluster that it is closest to nbrs = NearestNeighbors(n_neighbors=1).fit(cluster_centers) labels = np.zeros(n_samples, dtype=np.int) distances, idxs = nbrs.kneighbors(X) if cluster_all: labels = idxs.flatten() else: labels.fill(-1) bool_selector = distances.flatten() <= bandwidth labels[bool_selector] = idxs.flatten()[bool_selector] return cluster_centers, labels def get_bin_seeds(X, bin_size, min_bin_freq=1): """Finds seeds for mean_shift. Finds seeds by first binning data onto a grid whose lines are spaced bin_size apart, and then choosing those bins with at least min_bin_freq points. Parameters ---------- X : array-like, shape=[n_samples, n_features] Input points, the same points that will be used in mean_shift. bin_size : float Controls the coarseness of the binning. Smaller values lead to more seeding (which is computationally more expensive). If you're not sure how to set this, set it to the value of the bandwidth used in clustering.mean_shift. min_bin_freq : integer, optional Only bins with at least min_bin_freq will be selected as seeds. Raising this value decreases the number of seeds found, which makes mean_shift computationally cheaper. Returns ------- bin_seeds : array-like, shape=[n_samples, n_features] Points used as initial kernel positions in clustering.mean_shift. """ # Bin points bin_sizes = defaultdict(int) for point in X: binned_point = np.round(point / bin_size) bin_sizes[tuple(binned_point)] += 1 # Select only those bins as seeds which have enough members bin_seeds = np.array([point for point, freq in six.iteritems(bin_sizes) if freq >= min_bin_freq], dtype=np.float32) if len(bin_seeds) == len(X): warnings.warn("Binning data failed with provided bin_size=%f," " using data points as seeds." % bin_size) return X bin_seeds = bin_seeds * bin_size return bin_seeds class MeanShift(BaseEstimator, ClusterMixin): """Mean shift clustering using a flat kernel. Mean shift clustering aims to discover "blobs" in a smooth density of samples. It is a centroid-based algorithm, which works by updating candidates for centroids to be the mean of the points within a given region. These candidates are then filtered in a post-processing stage to eliminate near-duplicates to form the final set of centroids. Seeding is performed using a binning technique for scalability. Read more in the :ref:`User Guide <mean_shift>`. Parameters ---------- bandwidth : float, optional Bandwidth used in the RBF kernel. If not given, the bandwidth is estimated using sklearn.cluster.estimate_bandwidth; see the documentation for that function for hints on scalability (see also the Notes, below). seeds : array, shape=[n_samples, n_features], optional Seeds used to initialize kernels. If not set, the seeds are calculated by clustering.get_bin_seeds with bandwidth as the grid size and default values for other parameters. bin_seeding : boolean, optional If true, initial kernel locations are not locations of all points, but rather the location of the discretized version of points, where points are binned onto a grid whose coarseness corresponds to the bandwidth. Setting this option to True will speed up the algorithm because fewer seeds will be initialized. default value: False Ignored if seeds argument is not None. min_bin_freq : int, optional To speed up the algorithm, accept only those bins with at least min_bin_freq points as seeds. If not defined, set to 1. cluster_all : boolean, default True If true, then all points are clustered, even those orphans that are not within any kernel. Orphans are assigned to the nearest kernel. If false, then orphans are given cluster label -1. n_jobs : int The number of jobs to use for the computation. This works by computing each of the n_init runs in parallel. If -1 all CPUs are used. If 1 is given, no parallel computing code is used at all, which is useful for debugging. For n_jobs below -1, (n_cpus + 1 + n_jobs) are used. Thus for n_jobs = -2, all CPUs but one are used. Attributes ---------- cluster_centers_ : array, [n_clusters, n_features] Coordinates of cluster centers. labels_ : Labels of each point. Notes ----- Scalability: Because this implementation uses a flat kernel and a Ball Tree to look up members of each kernel, the complexity will is to O(T*n*log(n)) in lower dimensions, with n the number of samples and T the number of points. In higher dimensions the complexity will tend towards O(T*n^2). Scalability can be boosted by using fewer seeds, for example by using a higher value of min_bin_freq in the get_bin_seeds function. Note that the estimate_bandwidth function is much less scalable than the mean shift algorithm and will be the bottleneck if it is used. References ---------- Dorin Comaniciu and Peter Meer, "Mean Shift: A robust approach toward feature space analysis". IEEE Transactions on Pattern Analysis and Machine Intelligence. 2002. pp. 603-619. """ def __init__(self, bandwidth=None, seeds=None, bin_seeding=False, min_bin_freq=1, cluster_all=True, n_jobs=1): self.bandwidth = bandwidth self.seeds = seeds self.bin_seeding = bin_seeding self.cluster_all = cluster_all self.min_bin_freq = min_bin_freq self.n_jobs = n_jobs def fit(self, X, y=None): """Perform clustering. Parameters ----------- X : array-like, shape=[n_samples, n_features] Samples to cluster. """ X = check_array(X) self.cluster_centers_, self.labels_ = \ mean_shift(X, bandwidth=self.bandwidth, seeds=self.seeds, min_bin_freq=self.min_bin_freq, bin_seeding=self.bin_seeding, cluster_all=self.cluster_all, n_jobs=self.n_jobs) return self def predict(self, X): """Predict the closest cluster each sample in X belongs to. Parameters ---------- X : {array-like, sparse matrix}, shape=[n_samples, n_features] New data to predict. Returns ------- labels : array, shape [n_samples,] Index of the cluster each sample belongs to. """ check_is_fitted(self, "cluster_centers_") return pairwise_distances_argmin(X, self.cluster_centers_)
bsd-3-clause
kagayakidan/scikit-learn
examples/exercises/plot_iris_exercise.py
320
1602
""" ================================ SVM Exercise ================================ A tutorial exercise for using different SVM kernels. This exercise is used in the :ref:`using_kernels_tut` part of the :ref:`supervised_learning_tut` section of the :ref:`stat_learn_tut_index`. """ print(__doc__) import numpy as np import matplotlib.pyplot as plt from sklearn import datasets, svm iris = datasets.load_iris() X = iris.data y = iris.target X = X[y != 0, :2] y = y[y != 0] n_sample = len(X) np.random.seed(0) order = np.random.permutation(n_sample) X = X[order] y = y[order].astype(np.float) X_train = X[:.9 * n_sample] y_train = y[:.9 * n_sample] X_test = X[.9 * n_sample:] y_test = y[.9 * n_sample:] # fit the model for fig_num, kernel in enumerate(('linear', 'rbf', 'poly')): clf = svm.SVC(kernel=kernel, gamma=10) clf.fit(X_train, y_train) plt.figure(fig_num) plt.clf() plt.scatter(X[:, 0], X[:, 1], c=y, zorder=10, cmap=plt.cm.Paired) # Circle out the test data plt.scatter(X_test[:, 0], X_test[:, 1], s=80, facecolors='none', zorder=10) plt.axis('tight') x_min = X[:, 0].min() x_max = X[:, 0].max() y_min = X[:, 1].min() y_max = X[:, 1].max() XX, YY = np.mgrid[x_min:x_max:200j, y_min:y_max:200j] Z = clf.decision_function(np.c_[XX.ravel(), YY.ravel()]) # Put the result into a color plot Z = Z.reshape(XX.shape) plt.pcolormesh(XX, YY, Z > 0, cmap=plt.cm.Paired) plt.contour(XX, YY, Z, colors=['k', 'k', 'k'], linestyles=['--', '-', '--'], levels=[-.5, 0, .5]) plt.title(kernel) plt.show()
bsd-3-clause
shahankhatch/scikit-learn
sklearn/utils/fixes.py
132
12882
"""Compatibility fixes for older version of python, numpy and scipy If you add content to this file, please give the version of the package at which the fixe is no longer needed. """ # Authors: Emmanuelle Gouillart <emmanuelle.gouillart@normalesup.org> # Gael Varoquaux <gael.varoquaux@normalesup.org> # Fabian Pedregosa <fpedregosa@acm.org> # Lars Buitinck # # License: BSD 3 clause import inspect import warnings import sys import functools import os import errno import numpy as np import scipy.sparse as sp import scipy def _parse_version(version_string): version = [] for x in version_string.split('.'): try: version.append(int(x)) except ValueError: # x may be of the form dev-1ea1592 version.append(x) return tuple(version) np_version = _parse_version(np.__version__) sp_version = _parse_version(scipy.__version__) try: from scipy.special import expit # SciPy >= 0.10 with np.errstate(invalid='ignore', over='ignore'): if np.isnan(expit(1000)): # SciPy < 0.14 raise ImportError("no stable expit in scipy.special") except ImportError: def expit(x, out=None): """Logistic sigmoid function, ``1 / (1 + exp(-x))``. See sklearn.utils.extmath.log_logistic for the log of this function. """ if out is None: out = np.empty(np.atleast_1d(x).shape, dtype=np.float64) out[:] = x # 1 / (1 + exp(-x)) = (1 + tanh(x / 2)) / 2 # This way of computing the logistic is both fast and stable. out *= .5 np.tanh(out, out) out += 1 out *= .5 return out.reshape(np.shape(x)) # little danse to see if np.copy has an 'order' keyword argument if 'order' in inspect.getargspec(np.copy)[0]: def safe_copy(X): # Copy, but keep the order return np.copy(X, order='K') else: # Before an 'order' argument was introduced, numpy wouldn't muck with # the ordering safe_copy = np.copy try: if (not np.allclose(np.divide(.4, 1, casting="unsafe"), np.divide(.4, 1, casting="unsafe", dtype=np.float)) or not np.allclose(np.divide(.4, 1), .4)): raise TypeError('Divide not working with dtype: ' 'https://github.com/numpy/numpy/issues/3484') divide = np.divide except TypeError: # Compat for old versions of np.divide that do not provide support for # the dtype args def divide(x1, x2, out=None, dtype=None): out_orig = out if out is None: out = np.asarray(x1, dtype=dtype) if out is x1: out = x1.copy() else: if out is not x1: out[:] = x1 if dtype is not None and out.dtype != dtype: out = out.astype(dtype) out /= x2 if out_orig is None and np.isscalar(x1): out = np.asscalar(out) return out try: np.array(5).astype(float, copy=False) except TypeError: # Compat where astype accepted no copy argument def astype(array, dtype, copy=True): if not copy and array.dtype == dtype: return array return array.astype(dtype) else: astype = np.ndarray.astype try: with warnings.catch_warnings(record=True): # Don't raise the numpy deprecation warnings that appear in # 1.9, but avoid Python bug due to simplefilter('ignore') warnings.simplefilter('always') sp.csr_matrix([1.0, 2.0, 3.0]).max(axis=0) except (TypeError, AttributeError): # in scipy < 14.0, sparse matrix min/max doesn't accept an `axis` argument # the following code is taken from the scipy 0.14 codebase def _minor_reduce(X, ufunc): major_index = np.flatnonzero(np.diff(X.indptr)) if X.data.size == 0 and major_index.size == 0: # Numpy < 1.8.0 don't handle empty arrays in reduceat value = np.zeros_like(X.data) else: value = ufunc.reduceat(X.data, X.indptr[major_index]) return major_index, value def _min_or_max_axis(X, axis, min_or_max): N = X.shape[axis] if N == 0: raise ValueError("zero-size array to reduction operation") M = X.shape[1 - axis] mat = X.tocsc() if axis == 0 else X.tocsr() mat.sum_duplicates() major_index, value = _minor_reduce(mat, min_or_max) not_full = np.diff(mat.indptr)[major_index] < N value[not_full] = min_or_max(value[not_full], 0) mask = value != 0 major_index = np.compress(mask, major_index) value = np.compress(mask, value) from scipy.sparse import coo_matrix if axis == 0: res = coo_matrix((value, (np.zeros(len(value)), major_index)), dtype=X.dtype, shape=(1, M)) else: res = coo_matrix((value, (major_index, np.zeros(len(value)))), dtype=X.dtype, shape=(M, 1)) return res.A.ravel() def _sparse_min_or_max(X, axis, min_or_max): if axis is None: if 0 in X.shape: raise ValueError("zero-size array to reduction operation") zero = X.dtype.type(0) if X.nnz == 0: return zero m = min_or_max.reduce(X.data.ravel()) if X.nnz != np.product(X.shape): m = min_or_max(zero, m) return m if axis < 0: axis += 2 if (axis == 0) or (axis == 1): return _min_or_max_axis(X, axis, min_or_max) else: raise ValueError("invalid axis, use 0 for rows, or 1 for columns") def sparse_min_max(X, axis): return (_sparse_min_or_max(X, axis, np.minimum), _sparse_min_or_max(X, axis, np.maximum)) else: def sparse_min_max(X, axis): return (X.min(axis=axis).toarray().ravel(), X.max(axis=axis).toarray().ravel()) try: from numpy import argpartition except ImportError: # numpy.argpartition was introduced in v 1.8.0 def argpartition(a, kth, axis=-1, kind='introselect', order=None): return np.argsort(a, axis=axis, order=order) try: from itertools import combinations_with_replacement except ImportError: # Backport of itertools.combinations_with_replacement for Python 2.6, # from Python 3.4 documentation (http://tinyurl.com/comb-w-r), copyright # Python Software Foundation (https://docs.python.org/3/license.html) def combinations_with_replacement(iterable, r): # combinations_with_replacement('ABC', 2) --> AA AB AC BB BC CC pool = tuple(iterable) n = len(pool) if not n and r: return indices = [0] * r yield tuple(pool[i] for i in indices) while True: for i in reversed(range(r)): if indices[i] != n - 1: break else: return indices[i:] = [indices[i] + 1] * (r - i) yield tuple(pool[i] for i in indices) try: from numpy import isclose except ImportError: def isclose(a, b, rtol=1.e-5, atol=1.e-8, equal_nan=False): """ Returns a boolean array where two arrays are element-wise equal within a tolerance. This function was added to numpy v1.7.0, and the version you are running has been backported from numpy v1.8.1. See its documentation for more details. """ def within_tol(x, y, atol, rtol): with np.errstate(invalid='ignore'): result = np.less_equal(abs(x - y), atol + rtol * abs(y)) if np.isscalar(a) and np.isscalar(b): result = bool(result) return result x = np.array(a, copy=False, subok=True, ndmin=1) y = np.array(b, copy=False, subok=True, ndmin=1) xfin = np.isfinite(x) yfin = np.isfinite(y) if all(xfin) and all(yfin): return within_tol(x, y, atol, rtol) else: finite = xfin & yfin cond = np.zeros_like(finite, subok=True) # Since we're using boolean indexing, x & y must be the same shape. # Ideally, we'd just do x, y = broadcast_arrays(x, y). It's in # lib.stride_tricks, though, so we can't import it here. x = x * np.ones_like(cond) y = y * np.ones_like(cond) # Avoid subtraction with infinite/nan values... cond[finite] = within_tol(x[finite], y[finite], atol, rtol) # Check for equality of infinite values... cond[~finite] = (x[~finite] == y[~finite]) if equal_nan: # Make NaN == NaN cond[np.isnan(x) & np.isnan(y)] = True return cond if np_version < (1, 7): # Prior to 1.7.0, np.frombuffer wouldn't work for empty first arg. def frombuffer_empty(buf, dtype): if len(buf) == 0: return np.empty(0, dtype=dtype) else: return np.frombuffer(buf, dtype=dtype) else: frombuffer_empty = np.frombuffer if np_version < (1, 8): def in1d(ar1, ar2, assume_unique=False, invert=False): # Backport of numpy function in1d 1.8.1 to support numpy 1.6.2 # Ravel both arrays, behavior for the first array could be different ar1 = np.asarray(ar1).ravel() ar2 = np.asarray(ar2).ravel() # This code is significantly faster when the condition is satisfied. if len(ar2) < 10 * len(ar1) ** 0.145: if invert: mask = np.ones(len(ar1), dtype=np.bool) for a in ar2: mask &= (ar1 != a) else: mask = np.zeros(len(ar1), dtype=np.bool) for a in ar2: mask |= (ar1 == a) return mask # Otherwise use sorting if not assume_unique: ar1, rev_idx = np.unique(ar1, return_inverse=True) ar2 = np.unique(ar2) ar = np.concatenate((ar1, ar2)) # We need this to be a stable sort, so always use 'mergesort' # here. The values from the first array should always come before # the values from the second array. order = ar.argsort(kind='mergesort') sar = ar[order] if invert: bool_ar = (sar[1:] != sar[:-1]) else: bool_ar = (sar[1:] == sar[:-1]) flag = np.concatenate((bool_ar, [invert])) indx = order.argsort(kind='mergesort')[:len(ar1)] if assume_unique: return flag[indx] else: return flag[indx][rev_idx] else: from numpy import in1d if sp_version < (0, 15): # Backport fix for scikit-learn/scikit-learn#2986 / scipy/scipy#4142 from ._scipy_sparse_lsqr_backport import lsqr as sparse_lsqr else: from scipy.sparse.linalg import lsqr as sparse_lsqr if sys.version_info < (2, 7, 0): # partial cannot be pickled in Python 2.6 # http://bugs.python.org/issue1398 class partial(object): def __init__(self, func, *args, **keywords): functools.update_wrapper(self, func) self.func = func self.args = args self.keywords = keywords def __call__(self, *args, **keywords): args = self.args + args kwargs = self.keywords.copy() kwargs.update(keywords) return self.func(*args, **kwargs) else: from functools import partial if np_version < (1, 6, 2): # Allow bincount to accept empty arrays # https://github.com/numpy/numpy/commit/40f0844846a9d7665616b142407a3d74cb65a040 def bincount(x, weights=None, minlength=None): if len(x) > 0: return np.bincount(x, weights, minlength) else: if minlength is None: minlength = 0 minlength = np.asscalar(np.asarray(minlength, dtype=np.intp)) return np.zeros(minlength, dtype=np.intp) else: from numpy import bincount if 'exist_ok' in inspect.getargspec(os.makedirs).args: makedirs = os.makedirs else: def makedirs(name, mode=0o777, exist_ok=False): """makedirs(name [, mode=0o777][, exist_ok=False]) Super-mkdir; create a leaf directory and all intermediate ones. Works like mkdir, except that any intermediate path segment (not just the rightmost) will be created if it does not exist. If the target directory already exists, raise an OSError if exist_ok is False. Otherwise no exception is raised. This is recursive. """ try: os.makedirs(name, mode=mode) except OSError as e: if (not exist_ok or e.errno != errno.EEXIST or not os.path.isdir(name)): raise
bsd-3-clause
kevin-intel/scikit-learn
setup.py
1
11665
#! /usr/bin/env python # # Copyright (C) 2007-2009 Cournapeau David <cournape@gmail.com> # 2010 Fabian Pedregosa <fabian.pedregosa@inria.fr> # License: 3-clause BSD import sys import os import platform import shutil # We need to import setuptools before because it monkey-patches distutils import setuptools # noqa from distutils.command.clean import clean as Clean from distutils.command.sdist import sdist import traceback import importlib try: import builtins except ImportError: # Python 2 compat: just to be able to declare that Python >=3.7 is needed. import __builtin__ as builtins # This is a bit (!) hackish: we are setting a global variable so that the # main sklearn __init__ can detect if it is being loaded by the setup # routine, to avoid attempting to load components that aren't built yet: # the numpy distutils extensions that are used by scikit-learn to # recursively build the compiled extensions in sub-packages is based on the # Python import machinery. builtins.__SKLEARN_SETUP__ = True DISTNAME = 'scikit-learn' DESCRIPTION = 'A set of python modules for machine learning and data mining' with open('README.rst') as f: LONG_DESCRIPTION = f.read() MAINTAINER = 'Andreas Mueller' MAINTAINER_EMAIL = 'amueller@ais.uni-bonn.de' URL = 'http://scikit-learn.org' DOWNLOAD_URL = 'https://pypi.org/project/scikit-learn/#files' LICENSE = 'new BSD' PROJECT_URLS = { 'Bug Tracker': 'https://github.com/scikit-learn/scikit-learn/issues', 'Documentation': 'https://scikit-learn.org/stable/documentation.html', 'Source Code': 'https://github.com/scikit-learn/scikit-learn' } # We can actually import a restricted version of sklearn that # does not need the compiled code import sklearn # noqa import sklearn._min_dependencies as min_deps # noqa from sklearn.externals._packaging.version import parse as parse_version # noqa VERSION = sklearn.__version__ # For some commands, use setuptools SETUPTOOLS_COMMANDS = { 'develop', 'release', 'bdist_egg', 'bdist_rpm', 'bdist_wininst', 'install_egg_info', 'build_sphinx', 'egg_info', 'easy_install', 'upload', 'bdist_wheel', '--single-version-externally-managed', } if SETUPTOOLS_COMMANDS.intersection(sys.argv): extra_setuptools_args = dict( zip_safe=False, # the package can run out of an .egg file include_package_data=True, extras_require={ key: min_deps.tag_to_packages[key] for key in ['examples', 'docs', 'tests', 'benchmark'] }, ) else: extra_setuptools_args = dict() # Custom clean command to remove build artifacts class CleanCommand(Clean): description = "Remove build artifacts from the source tree" def run(self): Clean.run(self) # Remove c files if we are not within a sdist package cwd = os.path.abspath(os.path.dirname(__file__)) remove_c_files = not os.path.exists(os.path.join(cwd, 'PKG-INFO')) if remove_c_files: print('Will remove generated .c files') if os.path.exists('build'): shutil.rmtree('build') for dirpath, dirnames, filenames in os.walk('sklearn'): for filename in filenames: if any(filename.endswith(suffix) for suffix in (".so", ".pyd", ".dll", ".pyc")): os.unlink(os.path.join(dirpath, filename)) continue extension = os.path.splitext(filename)[1] if remove_c_files and extension in ['.c', '.cpp']: pyx_file = str.replace(filename, extension, '.pyx') if os.path.exists(os.path.join(dirpath, pyx_file)): os.unlink(os.path.join(dirpath, filename)) for dirname in dirnames: if dirname == '__pycache__': shutil.rmtree(os.path.join(dirpath, dirname)) cmdclass = {'clean': CleanCommand, 'sdist': sdist} # Custom build_ext command to set OpenMP compile flags depending on os and # compiler. Also makes it possible to set the parallelism level via # and environment variable (useful for the wheel building CI). # build_ext has to be imported after setuptools try: from numpy.distutils.command.build_ext import build_ext # noqa class build_ext_subclass(build_ext): def finalize_options(self): super().finalize_options() if self.parallel is None: # Do not override self.parallel if already defined by # command-line flag (--parallel or -j) parallel = os.environ.get("SKLEARN_BUILD_PARALLEL") if parallel: self.parallel = int(parallel) if self.parallel: print("setting parallel=%d " % self.parallel) def build_extensions(self): from sklearn._build_utils.openmp_helpers import get_openmp_flag if sklearn._OPENMP_SUPPORTED: openmp_flag = get_openmp_flag(self.compiler) for e in self.extensions: e.extra_compile_args += openmp_flag e.extra_link_args += openmp_flag build_ext.build_extensions(self) cmdclass['build_ext'] = build_ext_subclass except ImportError: # Numpy should not be a dependency just to be able to introspect # that python 3.7 is required. pass # Optional wheelhouse-uploader features # To automate release of binary packages for scikit-learn we need a tool # to download the packages generated by travis and appveyor workers (with # version number matching the current release) and upload them all at once # to PyPI at release time. # The URL of the artifact repositories are configured in the setup.cfg file. WHEELHOUSE_UPLOADER_COMMANDS = {'fetch_artifacts', 'upload_all'} if WHEELHOUSE_UPLOADER_COMMANDS.intersection(sys.argv): import wheelhouse_uploader.cmd cmdclass.update(vars(wheelhouse_uploader.cmd)) def configuration(parent_package='', top_path=None): if os.path.exists('MANIFEST'): os.remove('MANIFEST') from numpy.distutils.misc_util import Configuration from sklearn._build_utils import _check_cython_version config = Configuration(None, parent_package, top_path) # Avoid non-useful msg: # "Ignoring attempt to set 'name' (from ... " config.set_options(ignore_setup_xxx_py=True, assume_default_configuration=True, delegate_options_to_subpackages=True, quiet=True) # Cython is required by config.add_subpackage for templated extensions # that need the tempita sub-submodule. So check that we have the correct # version of Cython so as to be able to raise a more informative error # message from the start if it's not the case. _check_cython_version() config.add_subpackage('sklearn') return config def check_package_status(package, min_version): """ Returns a dictionary containing a boolean specifying whether given package is up-to-date, along with the version string (empty string if not installed). """ package_status = {} try: module = importlib.import_module(package) package_version = module.__version__ package_status['up_to_date'] = parse_version( package_version) >= parse_version(min_version) package_status['version'] = package_version except ImportError: traceback.print_exc() package_status['up_to_date'] = False package_status['version'] = "" req_str = "scikit-learn requires {} >= {}.\n".format( package, min_version) instructions = ("Installation instructions are available on the " "scikit-learn website: " "http://scikit-learn.org/stable/install.html\n") if package_status['up_to_date'] is False: if package_status['version']: raise ImportError("Your installation of {} " "{} is out-of-date.\n{}{}" .format(package, package_status['version'], req_str, instructions)) else: raise ImportError("{} is not " "installed.\n{}{}" .format(package, req_str, instructions)) def setup_package(): metadata = dict(name=DISTNAME, maintainer=MAINTAINER, maintainer_email=MAINTAINER_EMAIL, description=DESCRIPTION, license=LICENSE, url=URL, download_url=DOWNLOAD_URL, project_urls=PROJECT_URLS, version=VERSION, long_description=LONG_DESCRIPTION, classifiers=['Intended Audience :: Science/Research', 'Intended Audience :: Developers', 'License :: OSI Approved', 'Programming Language :: C', 'Programming Language :: Python', 'Topic :: Software Development', 'Topic :: Scientific/Engineering', 'Development Status :: 5 - Production/Stable', 'Operating System :: Microsoft :: Windows', 'Operating System :: POSIX', 'Operating System :: Unix', 'Operating System :: MacOS', 'Programming Language :: Python :: 3', 'Programming Language :: Python :: 3.7', 'Programming Language :: Python :: 3.8', 'Programming Language :: Python :: 3.9', ('Programming Language :: Python :: ' 'Implementation :: CPython'), ('Programming Language :: Python :: ' 'Implementation :: PyPy') ], cmdclass=cmdclass, python_requires=">=3.7", install_requires=min_deps.tag_to_packages['install'], package_data={'': ['*.pxd']}, **extra_setuptools_args) commands = [arg for arg in sys.argv[1:] if not arg.startswith('-')] if all(command in ('egg_info', 'dist_info', 'clean', 'check') for command in commands): # These actions are required to succeed without Numpy for example when # pip is used to install Scikit-learn when Numpy is not yet present in # the system. # These commands use setup from setuptools from setuptools import setup metadata['version'] = VERSION else: if sys.version_info < (3, 6): raise RuntimeError( "Scikit-learn requires Python 3.7 or later. The current" " Python version is %s installed in %s." % (platform.python_version(), sys.executable)) check_package_status('numpy', min_deps.NUMPY_MIN_VERSION) check_package_status('scipy', min_deps.SCIPY_MIN_VERSION) # These commands require the setup from numpy.distutils because they # may use numpy.distutils compiler classes. from numpy.distutils.core import setup metadata['configuration'] = configuration setup(**metadata) if __name__ == "__main__": setup_package()
bsd-3-clause
mkraemer67/pylearn2
pylearn2/tests/test_theano.py
45
4805
""" Include tests related to Theano. 1) One test on one thing Pylearn2 depend to be done by Theano. 2) One test for a rare corner case crash in Theano that we where not able to reproduce rapidly enough without having this tests depend on Pylearn2. """ __authors__ = "Ian Goodfellow" __copyright__ = "Copyright 2010-2012, Universite de Montreal" __credits__ = ["Ian Goodfellow"] __license__ = "3-clause BSD" __maintainer__ = "LISA Lab" __email__ = "pylearn-dev@googlegroups" import numpy as np import theano from theano import tensor as T import pylearn2 from pylearn2.config import yaml_parse from pylearn2.testing.skip import skip_if_no_gpu def test_grad(): """Tests that the theano grad method returns a list if it is passed a list and a single variable if it is passed a single variable. pylearn2 depends on theano behaving this way but theano developers have repeatedly changed it """ X = T.matrix() y = X.sum() G = T.grad(y, [X]) assert isinstance(G, list) G = T.grad(y, X) assert not isinstance(G, list) def test_biglayer(): """Test a crash during Theano compilation. It would be too long to redo this test without depending on Pylearn2. So we put it here. """ skip_if_no_gpu() yaml_string = """ !obj:pylearn2.train.Train { dataset: &train !obj:pylearn2.testing.datasets.random_one_hot_topological_dense_design_matrix { rng: !obj:numpy.random.RandomState { seed: [2014, 6, 6] }, shape: &input_shape [%(xsize)i, %(ysize)i], channels: 4, axes: ['c', 0, 1, 'b'], num_examples: 128, num_classes: 10 }, model: !obj:pylearn2.models.mlp.MLP { batch_size: 128, layers: [ !obj:pylearn2.models.mlp.FlattenerLayer { raw_layer: !obj:pylearn2.models.mlp.CompositeLayer { layer_name: 'h0', layers: [ !obj:pylearn2.models.mlp.MLP { layer_name: 'h1', layers: [ !obj:pylearn2.models.maxout.MaxoutConvC01B { layer_name: 'conv00', tied_b: 1, W_lr_scale: .05, b_lr_scale: .05, num_channels: 16, num_pieces: 1, kernel_shape: [1, 1], pool_shape: [4, 4], pool_stride: [4, 4], irange: .005, max_kernel_norm: 0.9, } ]}, !obj:pylearn2.models.maxout.Maxout { layer_name: 'max0', W_lr_scale: .1, b_lr_scale: .1, num_units: 16, irange: .005, max_col_norm: 1.9365, num_pieces: 1, } ] } }, !obj:pylearn2.models.mlp.Softmax { max_col_norm: 1.9365, layer_name: 'y', n_classes: 10, irange: .005 } ], input_space: !obj:pylearn2.space.Conv2DSpace { shape: *input_shape, num_channels: 4, axes: ['c', 0, 1, 'b'], }, }, algorithm: !obj:pylearn2.training_algorithms.sgd.SGD { learning_rate: .05, learning_rule: !obj:pylearn2.training_algorithms.learning_rule.Momentum { init_momentum: 0.5, }, monitoring_dataset: { 'train': *train }, termination_criterion: !obj:pylearn2.termination_criteria.EpochCounter { max_epochs: 3 }, }, extensions: [ !obj:pylearn2.training_algorithms.learning_rule.MomentumAdjustor { start: 1, saturate: 250, final_momentum: .7 } ] } """ try: orig_floatX = theano.config.floatX theano.config.floatX = 'float32' theano.sandbox.cuda.use('gpu') x_size, y_size = 4, 4 parameters = {'xsize': x_size, 'ysize': y_size} test = yaml_parse.load(yaml_string % parameters) test.main_loop() finally: theano.config.floatX = orig_floatX theano.sandbox.cuda.unuse()
bsd-3-clause
llooker/public-datasets-pipelines
datasets/census_bureau_acs/pipelines/schooldistrictsecondary_2019_5yr/schooldistrictsecondary_2019_5yr_dag.py
1
32894
# Copyright 2021 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from airflow import DAG from airflow.providers.cncf.kubernetes.operators import kubernetes_pod from airflow.providers.google.cloud.transfers import gcs_to_bigquery default_args = { "owner": "Google", "depends_on_past": False, "start_date": "2021-03-01", } with DAG( dag_id="census_bureau_acs.schooldistrictsecondary_2019_5yr", default_args=default_args, max_active_runs=1, schedule_interval="@once", catchup=False, default_view="graph", ) as dag: # Run CSV transform within kubernetes pod transform_csv = kubernetes_pod.KubernetesPodOperator( task_id="transform_csv", startup_timeout_seconds=600, name="schooldistrictsecondary_2019_5yr", namespace="composer", service_account_name="datasets", image_pull_policy="Always", image="{{ var.json.census_bureau_acs.container_registry.run_csv_transform_kub }}", env_vars={ "SOURCE_URL": "https://api.census.gov/data/2019/acs/acs~year_report~?get=NAME,~group_id~_~row_position~E&for=~api_naming_convention~:*&in=state:~state_code~&key=550e53635053be51754b09b5e9f5009c94aa0586", "YEAR_REPORT": "5", "API_NAMING_CONVENTION": "school%20district%20(secondary)", "TARGET_FILE": "files/data_output.csv", "TARGET_GCS_BUCKET": "{{ var.value.composer_bucket }}", "TARGET_GCS_PATH": "data/census_bureau_acs/schooldistrictsecondary_2019_5yr/data_output.csv", "PIPELINE_NAME": "schooldistrictsecondary_2019_5yr", "GEOGRAPHY": "schooldistrictsecondary", "REPORT_LEVEL": "state_level", "CONCAT_COL": '["state","school_district"]', "RENAME_MAPPINGS": '{"0":"name", "1":"KPI_Value", "2":"state", "3":"school_district"}', "CSV_HEADERS": '["geo_id","aggregate_travel_time_to_work","amerindian_including_hispanic","amerindian_pop","armed_forces","asian_including_hispanic","asian_male_45_54","asian_male_55_64","asian_pop","associates_degree","bachelors_degree","bachelors_degree_2","bachelors_degree_or_higher_25_64","black_including_hispanic","black_male_45_54","black_male_55_64","black_pop","children","children_in_single_female_hh","civilian_labor_force","commute_10_14_mins","commute_15_19_mins","commute_20_24_mins","commute_25_29_mins","commute_30_34_mins","commute_35_39_mins","commute_35_44_mins","commute_40_44_mins","commute_45_59_mins","commute_5_9_mins","commute_60_89_mins","commute_60_more_mins","commute_90_more_mins","commute_less_10_mins","commuters_16_over","commuters_by_bus","commuters_by_car_truck_van","commuters_by_carpool","commuters_by_public_transportation","commuters_by_subway_or_elevated","commuters_drove_alone","different_house_year_ago_different_city","different_house_year_ago_same_city","dwellings_10_to_19_units","dwellings_1_units_attached","dwellings_1_units_detached","dwellings_20_to_49_units","dwellings_2_units","dwellings_3_to_4_units","dwellings_50_or_more_units","dwellings_5_to_9_units","employed_agriculture_forestry_fishing_hunting_mining","employed_arts_entertainment_recreation_accommodation_food","employed_construction","employed_education_health_social","employed_finance_insurance_real_estate","employed_information","employed_manufacturing","employed_other_services_not_public_admin","employed_pop","employed_public_administration","employed_retail_trade","employed_science_management_admin_waste","employed_transportation_warehousing_utilities","employed_wholesale_trade","families_with_young_children","family_households","father_in_labor_force_one_parent_families_with_young_children","father_one_parent_families_with_young_children","female_10_to_14","female_15_to_17","female_18_to_19","female_20","female_21","female_22_to_24","female_25_to_29","female_30_to_34","female_35_to_39","female_40_to_44","female_45_to_49","female_50_to_54","female_55_to_59","female_5_to_9","female_60_to_61","female_62_to_64","female_65_to_66","female_67_to_69","female_70_to_74","female_75_to_79","female_80_to_84","female_85_and_over","female_female_households","female_pop","female_under_5","four_more_cars","gini_index","graduate_professional_degree","group_quarters","high_school_diploma","high_school_including_ged","hispanic_any_race","hispanic_male_45_54","hispanic_male_55_64","hispanic_pop","households","households_public_asst_or_food_stamps","households_retirement_income","housing_built_1939_or_earlier","housing_built_2000_to_2004","housing_built_2005_or_later","housing_units","housing_units_renter_occupied","in_grades_1_to_4","in_grades_5_to_8","in_grades_9_to_12","in_school","in_undergrad_college","income_100000_124999","income_10000_14999","income_125000_149999","income_150000_199999","income_15000_19999","income_200000_or_more","income_20000_24999","income_25000_29999","income_30000_34999","income_35000_39999","income_40000_44999","income_45000_49999","income_50000_59999","income_60000_74999","income_75000_99999","income_less_10000","income_per_capita","less_one_year_college","less_than_high_school_graduate","male_10_to_14","male_15_to_17","male_18_to_19","male_20","male_21","male_22_to_24","male_25_to_29","male_30_to_34","male_35_to_39","male_40_to_44","male_45_64_associates_degree","male_45_64_bachelors_degree","male_45_64_grade_9_12","male_45_64_graduate_degree","male_45_64_high_school","male_45_64_less_than_9_grade","male_45_64_some_college","male_45_to_49","male_45_to_64","male_50_to_54","male_55_to_59","male_5_to_9","male_60_to_61","male_62_to_64","male_65_to_66","male_67_to_69","male_70_to_74","male_75_to_79","male_80_to_84","male_85_and_over","male_male_households","male_pop","male_under_5","management_business_sci_arts_employed","married_households","masters_degree","median_age","median_income","median_rent","median_year_structure_built","million_dollar_housing_units","mobile_homes","mortgaged_housing_units","no_car","no_cars","nonfamily_households","not_hispanic_pop","not_in_labor_force","not_us_citizen_pop","occupation_management_arts","occupation_natural_resources_construction_maintenance","occupation_production_transportation_material","occupation_sales_office","occupation_services","occupied_housing_units","one_car","one_parent_families_with_young_children","one_year_more_college","other_race_pop","owner_occupied_housing_units","owner_occupied_housing_units_lower_value_quartile","owner_occupied_housing_units_median_value","owner_occupied_housing_units_upper_value_quartile","percent_income_spent_on_rent","pop_16_over","pop_25_64","pop_25_years_over","pop_5_years_over","pop_determined_poverty_status","pop_in_labor_force","population_1_year_and_over","population_3_years_over","poverty","rent_10_to_15_percent","rent_15_to_20_percent","rent_20_to_25_percent","rent_25_to_30_percent","rent_30_to_35_percent","rent_35_to_40_percent","rent_40_to_50_percent","rent_burden_not_computed","rent_over_50_percent","rent_under_10_percent","renter_occupied_housing_units_paying_cash_median_gross_rent","sales_office_employed","some_college_and_associates_degree","speak_only_english_at_home","speak_spanish_at_home","speak_spanish_at_home_low_english","three_cars","total_pop","two_cars","two_or_more_races_pop","two_parent_families_with_young_children","two_parents_father_in_labor_force_families_with_young_children","two_parents_in_labor_force_families_with_young_children","two_parents_mother_in_labor_force_families_with_young_children","two_parents_not_in_labor_force_families_with_young_children","unemployed_pop","vacant_housing_units","vacant_housing_units_for_rent","vacant_housing_units_for_sale","walked_to_work","white_including_hispanic","white_male_45_54","white_male_55_64","white_pop","worked_at_home","workers_16_and_over"]', }, resources={ "request_memory": "2G", "request_cpu": "1", "request_ephemeral_storage": "10G", }, ) # Task to load CSV data to a BigQuery table load_to_bq = gcs_to_bigquery.GCSToBigQueryOperator( task_id="load_to_bq", bucket="{{ var.value.composer_bucket }}", source_objects=[ "data/census_bureau_acs/schooldistrictsecondary_2019_5yr/data_output.csv" ], source_format="CSV", destination_project_dataset_table="census_bureau_acs.schooldistrictsecondary_2019_5yr", skip_leading_rows=1, allow_quoted_newlines=True, write_disposition="WRITE_TRUNCATE", schema_fields=[ {"name": "geo_id", "type": "string", "mode": "nullable"}, { "name": "aggregate_travel_time_to_work", "type": "float", "mode": "nullable", }, { "name": "amerindian_including_hispanic", "type": "float", "mode": "nullable", }, {"name": "amerindian_pop", "type": "float", "mode": "nullable"}, {"name": "armed_forces", "type": "float", "mode": "nullable"}, {"name": "asian_including_hispanic", "type": "float", "mode": "nullable"}, {"name": "asian_male_45_54", "type": "float", "mode": "nullable"}, {"name": "asian_male_55_64", "type": "float", "mode": "nullable"}, {"name": "asian_pop", "type": "float", "mode": "nullable"}, {"name": "associates_degree", "type": "float", "mode": "nullable"}, {"name": "bachelors_degree", "type": "float", "mode": "nullable"}, {"name": "bachelors_degree_2", "type": "float", "mode": "nullable"}, { "name": "bachelors_degree_or_higher_25_64", "type": "float", "mode": "nullable", }, {"name": "black_including_hispanic", "type": "float", "mode": "nullable"}, {"name": "black_male_45_54", "type": "float", "mode": "nullable"}, {"name": "black_male_55_64", "type": "float", "mode": "nullable"}, {"name": "black_pop", "type": "float", "mode": "nullable"}, {"name": "children", "type": "float", "mode": "nullable"}, { "name": "children_in_single_female_hh", "type": "float", "mode": "nullable", }, {"name": "civilian_labor_force", "type": "float", "mode": "nullable"}, {"name": "commute_10_14_mins", "type": "float", "mode": "nullable"}, {"name": "commute_15_19_mins", "type": "float", "mode": "nullable"}, {"name": "commute_20_24_mins", "type": "float", "mode": "nullable"}, {"name": "commute_25_29_mins", "type": "float", "mode": "nullable"}, {"name": "commute_30_34_mins", "type": "float", "mode": "nullable"}, {"name": "commute_35_39_mins", "type": "float", "mode": "nullable"}, {"name": "commute_35_44_mins", "type": "float", "mode": "nullable"}, {"name": "commute_40_44_mins", "type": "float", "mode": "nullable"}, {"name": "commute_45_59_mins", "type": "float", "mode": "nullable"}, {"name": "commute_5_9_mins", "type": "float", "mode": "nullable"}, {"name": "commute_60_89_mins", "type": "float", "mode": "nullable"}, {"name": "commute_60_more_mins", "type": "float", "mode": "nullable"}, {"name": "commute_90_more_mins", "type": "float", "mode": "nullable"}, {"name": "commute_less_10_mins", "type": "float", "mode": "nullable"}, {"name": "commuters_16_over", "type": "float", "mode": "nullable"}, {"name": "commuters_by_bus", "type": "float", "mode": "nullable"}, {"name": "commuters_by_car_truck_van", "type": "float", "mode": "nullable"}, {"name": "commuters_by_carpool", "type": "float", "mode": "nullable"}, { "name": "commuters_by_public_transportation", "type": "float", "mode": "nullable", }, { "name": "commuters_by_subway_or_elevated", "type": "float", "mode": "nullable", }, {"name": "commuters_drove_alone", "type": "float", "mode": "nullable"}, { "name": "different_house_year_ago_different_city", "type": "float", "mode": "nullable", }, { "name": "different_house_year_ago_same_city", "type": "float", "mode": "nullable", }, {"name": "dwellings_10_to_19_units", "type": "float", "mode": "nullable"}, {"name": "dwellings_1_units_attached", "type": "float", "mode": "nullable"}, {"name": "dwellings_1_units_detached", "type": "float", "mode": "nullable"}, {"name": "dwellings_20_to_49_units", "type": "float", "mode": "nullable"}, {"name": "dwellings_2_units", "type": "float", "mode": "nullable"}, {"name": "dwellings_3_to_4_units", "type": "float", "mode": "nullable"}, {"name": "dwellings_50_or_more_units", "type": "float", "mode": "nullable"}, {"name": "dwellings_5_to_9_units", "type": "float", "mode": "nullable"}, { "name": "employed_agriculture_forestry_fishing_hunting_mining", "type": "float", "mode": "nullable", }, { "name": "employed_arts_entertainment_recreation_accommodation_food", "type": "float", "mode": "nullable", }, {"name": "employed_construction", "type": "float", "mode": "nullable"}, { "name": "employed_education_health_social", "type": "float", "mode": "nullable", }, { "name": "employed_finance_insurance_real_estate", "type": "float", "mode": "nullable", }, {"name": "employed_information", "type": "float", "mode": "nullable"}, {"name": "employed_manufacturing", "type": "float", "mode": "nullable"}, { "name": "employed_other_services_not_public_admin", "type": "float", "mode": "nullable", }, {"name": "employed_pop", "type": "float", "mode": "nullable"}, { "name": "employed_public_administration", "type": "float", "mode": "nullable", }, {"name": "employed_retail_trade", "type": "float", "mode": "nullable"}, { "name": "employed_science_management_admin_waste", "type": "float", "mode": "nullable", }, { "name": "employed_transportation_warehousing_utilities", "type": "float", "mode": "nullable", }, {"name": "employed_wholesale_trade", "type": "float", "mode": "nullable"}, { "name": "families_with_young_children", "type": "float", "mode": "nullable", }, {"name": "family_households", "type": "float", "mode": "nullable"}, { "name": "father_in_labor_force_one_parent_families_with_young_children", "type": "float", "mode": "nullable", }, { "name": "father_one_parent_families_with_young_children", "type": "float", "mode": "nullable", }, {"name": "female_10_to_14", "type": "float", "mode": "nullable"}, {"name": "female_15_to_17", "type": "float", "mode": "nullable"}, {"name": "female_18_to_19", "type": "float", "mode": "nullable"}, {"name": "female_20", "type": "float", "mode": "nullable"}, {"name": "female_21", "type": "float", "mode": "nullable"}, {"name": "female_22_to_24", "type": "float", "mode": "nullable"}, {"name": "female_25_to_29", "type": "float", "mode": "nullable"}, {"name": "female_30_to_34", "type": "float", "mode": "nullable"}, {"name": "female_35_to_39", "type": "float", "mode": "nullable"}, {"name": "female_40_to_44", "type": "float", "mode": "nullable"}, {"name": "female_45_to_49", "type": "float", "mode": "nullable"}, {"name": "female_50_to_54", "type": "float", "mode": "nullable"}, {"name": "female_55_to_59", "type": "float", "mode": "nullable"}, {"name": "female_5_to_9", "type": "float", "mode": "nullable"}, {"name": "female_60_to_61", "type": "float", "mode": "nullable"}, {"name": "female_62_to_64", "type": "float", "mode": "nullable"}, {"name": "female_65_to_66", "type": "float", "mode": "nullable"}, {"name": "female_67_to_69", "type": "float", "mode": "nullable"}, {"name": "female_70_to_74", "type": "float", "mode": "nullable"}, {"name": "female_75_to_79", "type": "float", "mode": "nullable"}, {"name": "female_80_to_84", "type": "float", "mode": "nullable"}, {"name": "female_85_and_over", "type": "float", "mode": "nullable"}, {"name": "female_female_households", "type": "float", "mode": "nullable"}, {"name": "female_pop", "type": "float", "mode": "nullable"}, {"name": "female_under_5", "type": "float", "mode": "nullable"}, {"name": "four_more_cars", "type": "float", "mode": "nullable"}, {"name": "gini_index", "type": "float", "mode": "nullable"}, { "name": "graduate_professional_degree", "type": "float", "mode": "nullable", }, {"name": "group_quarters", "type": "float", "mode": "nullable"}, {"name": "high_school_diploma", "type": "float", "mode": "nullable"}, {"name": "high_school_including_ged", "type": "float", "mode": "nullable"}, {"name": "hispanic_any_race", "type": "float", "mode": "nullable"}, {"name": "hispanic_male_45_54", "type": "float", "mode": "nullable"}, {"name": "hispanic_male_55_64", "type": "float", "mode": "nullable"}, {"name": "hispanic_pop", "type": "float", "mode": "nullable"}, {"name": "households", "type": "float", "mode": "nullable"}, { "name": "households_public_asst_or_food_stamps", "type": "float", "mode": "nullable", }, { "name": "households_retirement_income", "type": "float", "mode": "nullable", }, { "name": "housing_built_1939_or_earlier", "type": "float", "mode": "nullable", }, {"name": "housing_built_2000_to_2004", "type": "float", "mode": "nullable"}, { "name": "housing_built_2005_or_later", "type": "float", "mode": "nullable", }, {"name": "housing_units", "type": "float", "mode": "nullable"}, { "name": "housing_units_renter_occupied", "type": "float", "mode": "nullable", }, {"name": "in_grades_1_to_4", "type": "float", "mode": "nullable"}, {"name": "in_grades_5_to_8", "type": "float", "mode": "nullable"}, {"name": "in_grades_9_to_12", "type": "float", "mode": "nullable"}, {"name": "in_school", "type": "float", "mode": "nullable"}, {"name": "in_undergrad_college", "type": "float", "mode": "nullable"}, {"name": "income_100000_124999", "type": "float", "mode": "nullable"}, {"name": "income_10000_14999", "type": "float", "mode": "nullable"}, {"name": "income_125000_149999", "type": "float", "mode": "nullable"}, {"name": "income_150000_199999", "type": "float", "mode": "nullable"}, {"name": "income_15000_19999", "type": "float", "mode": "nullable"}, {"name": "income_200000_or_more", "type": "float", "mode": "nullable"}, {"name": "income_20000_24999", "type": "float", "mode": "nullable"}, {"name": "income_25000_29999", "type": "float", "mode": "nullable"}, {"name": "income_30000_34999", "type": "float", "mode": "nullable"}, {"name": "income_35000_39999", "type": "float", "mode": "nullable"}, {"name": "income_40000_44999", "type": "float", "mode": "nullable"}, {"name": "income_45000_49999", "type": "float", "mode": "nullable"}, {"name": "income_50000_59999", "type": "float", "mode": "nullable"}, {"name": "income_60000_74999", "type": "float", "mode": "nullable"}, {"name": "income_75000_99999", "type": "float", "mode": "nullable"}, {"name": "income_less_10000", "type": "float", "mode": "nullable"}, {"name": "income_per_capita", "type": "float", "mode": "nullable"}, {"name": "less_one_year_college", "type": "float", "mode": "nullable"}, { "name": "less_than_high_school_graduate", "type": "float", "mode": "nullable", }, {"name": "male_10_to_14", "type": "float", "mode": "nullable"}, {"name": "male_15_to_17", "type": "float", "mode": "nullable"}, {"name": "male_18_to_19", "type": "float", "mode": "nullable"}, {"name": "male_20", "type": "float", "mode": "nullable"}, {"name": "male_21", "type": "float", "mode": "nullable"}, {"name": "male_22_to_24", "type": "float", "mode": "nullable"}, {"name": "male_25_to_29", "type": "float", "mode": "nullable"}, {"name": "male_30_to_34", "type": "float", "mode": "nullable"}, {"name": "male_35_to_39", "type": "float", "mode": "nullable"}, {"name": "male_40_to_44", "type": "float", "mode": "nullable"}, { "name": "male_45_64_associates_degree", "type": "float", "mode": "nullable", }, { "name": "male_45_64_bachelors_degree", "type": "float", "mode": "nullable", }, {"name": "male_45_64_grade_9_12", "type": "float", "mode": "nullable"}, {"name": "male_45_64_graduate_degree", "type": "float", "mode": "nullable"}, {"name": "male_45_64_high_school", "type": "float", "mode": "nullable"}, { "name": "male_45_64_less_than_9_grade", "type": "float", "mode": "nullable", }, {"name": "male_45_64_some_college", "type": "float", "mode": "nullable"}, {"name": "male_45_to_49", "type": "float", "mode": "nullable"}, {"name": "male_45_to_64", "type": "float", "mode": "nullable"}, {"name": "male_50_to_54", "type": "float", "mode": "nullable"}, {"name": "male_55_to_59", "type": "float", "mode": "nullable"}, {"name": "male_5_to_9", "type": "float", "mode": "nullable"}, {"name": "male_60_to_61", "type": "float", "mode": "nullable"}, {"name": "male_62_to_64", "type": "float", "mode": "nullable"}, {"name": "male_65_to_66", "type": "float", "mode": "nullable"}, {"name": "male_67_to_69", "type": "float", "mode": "nullable"}, {"name": "male_70_to_74", "type": "float", "mode": "nullable"}, {"name": "male_75_to_79", "type": "float", "mode": "nullable"}, {"name": "male_80_to_84", "type": "float", "mode": "nullable"}, {"name": "male_85_and_over", "type": "float", "mode": "nullable"}, {"name": "male_male_households", "type": "float", "mode": "nullable"}, {"name": "male_pop", "type": "float", "mode": "nullable"}, {"name": "male_under_5", "type": "float", "mode": "nullable"}, { "name": "management_business_sci_arts_employed", "type": "float", "mode": "nullable", }, {"name": "married_households", "type": "float", "mode": "nullable"}, {"name": "masters_degree", "type": "float", "mode": "nullable"}, {"name": "median_age", "type": "float", "mode": "nullable"}, {"name": "median_income", "type": "float", "mode": "nullable"}, {"name": "median_rent", "type": "float", "mode": "nullable"}, { "name": "median_year_structure_built", "type": "float", "mode": "nullable", }, { "name": "million_dollar_housing_units", "type": "float", "mode": "nullable", }, {"name": "mobile_homes", "type": "float", "mode": "nullable"}, {"name": "mortgaged_housing_units", "type": "float", "mode": "nullable"}, {"name": "no_car", "type": "float", "mode": "nullable"}, {"name": "no_cars", "type": "float", "mode": "nullable"}, {"name": "nonfamily_households", "type": "float", "mode": "nullable"}, {"name": "not_hispanic_pop", "type": "float", "mode": "nullable"}, {"name": "not_in_labor_force", "type": "float", "mode": "nullable"}, {"name": "not_us_citizen_pop", "type": "float", "mode": "nullable"}, {"name": "occupation_management_arts", "type": "float", "mode": "nullable"}, { "name": "occupation_natural_resources_construction_maintenance", "type": "float", "mode": "nullable", }, { "name": "occupation_production_transportation_material", "type": "float", "mode": "nullable", }, {"name": "occupation_sales_office", "type": "float", "mode": "nullable"}, {"name": "occupation_services", "type": "float", "mode": "nullable"}, {"name": "occupied_housing_units", "type": "float", "mode": "nullable"}, {"name": "one_car", "type": "float", "mode": "nullable"}, { "name": "one_parent_families_with_young_children", "type": "float", "mode": "nullable", }, {"name": "one_year_more_college", "type": "float", "mode": "nullable"}, {"name": "other_race_pop", "type": "float", "mode": "nullable"}, { "name": "owner_occupied_housing_units", "type": "float", "mode": "nullable", }, { "name": "owner_occupied_housing_units_lower_value_quartile", "type": "float", "mode": "nullable", }, { "name": "owner_occupied_housing_units_median_value", "type": "float", "mode": "nullable", }, { "name": "owner_occupied_housing_units_upper_value_quartile", "type": "float", "mode": "nullable", }, { "name": "percent_income_spent_on_rent", "type": "float", "mode": "nullable", }, {"name": "pop_16_over", "type": "float", "mode": "nullable"}, {"name": "pop_25_64", "type": "float", "mode": "nullable"}, {"name": "pop_25_years_over", "type": "float", "mode": "nullable"}, {"name": "pop_5_years_over", "type": "float", "mode": "nullable"}, { "name": "pop_determined_poverty_status", "type": "float", "mode": "nullable", }, {"name": "pop_in_labor_force", "type": "float", "mode": "nullable"}, {"name": "population_1_year_and_over", "type": "float", "mode": "nullable"}, {"name": "population_3_years_over", "type": "float", "mode": "nullable"}, {"name": "poverty", "type": "float", "mode": "nullable"}, {"name": "rent_10_to_15_percent", "type": "float", "mode": "nullable"}, {"name": "rent_15_to_20_percent", "type": "float", "mode": "nullable"}, {"name": "rent_20_to_25_percent", "type": "float", "mode": "nullable"}, {"name": "rent_25_to_30_percent", "type": "float", "mode": "nullable"}, {"name": "rent_30_to_35_percent", "type": "float", "mode": "nullable"}, {"name": "rent_35_to_40_percent", "type": "float", "mode": "nullable"}, {"name": "rent_40_to_50_percent", "type": "float", "mode": "nullable"}, {"name": "rent_burden_not_computed", "type": "float", "mode": "nullable"}, {"name": "rent_over_50_percent", "type": "float", "mode": "nullable"}, {"name": "rent_under_10_percent", "type": "float", "mode": "nullable"}, { "name": "renter_occupied_housing_units_paying_cash_median_gross_rent", "type": "float", "mode": "nullable", }, {"name": "sales_office_employed", "type": "float", "mode": "nullable"}, { "name": "some_college_and_associates_degree", "type": "float", "mode": "nullable", }, {"name": "speak_only_english_at_home", "type": "float", "mode": "nullable"}, {"name": "speak_spanish_at_home", "type": "float", "mode": "nullable"}, { "name": "speak_spanish_at_home_low_english", "type": "float", "mode": "nullable", }, {"name": "three_cars", "type": "float", "mode": "nullable"}, {"name": "total_pop", "type": "float", "mode": "nullable"}, {"name": "two_cars", "type": "float", "mode": "nullable"}, {"name": "two_or_more_races_pop", "type": "float", "mode": "nullable"}, { "name": "two_parent_families_with_young_children", "type": "float", "mode": "nullable", }, { "name": "two_parents_father_in_labor_force_families_with_young_children", "type": "float", "mode": "nullable", }, { "name": "two_parents_in_labor_force_families_with_young_children", "type": "float", "mode": "nullable", }, { "name": "two_parents_mother_in_labor_force_families_with_young_children", "type": "float", "mode": "nullable", }, { "name": "two_parents_not_in_labor_force_families_with_young_children", "type": "float", "mode": "nullable", }, {"name": "unemployed_pop", "type": "float", "mode": "nullable"}, {"name": "vacant_housing_units", "type": "float", "mode": "nullable"}, { "name": "vacant_housing_units_for_rent", "type": "float", "mode": "nullable", }, { "name": "vacant_housing_units_for_sale", "type": "float", "mode": "nullable", }, {"name": "walked_to_work", "type": "float", "mode": "nullable"}, {"name": "white_including_hispanic", "type": "float", "mode": "nullable"}, {"name": "white_male_45_54", "type": "float", "mode": "nullable"}, {"name": "white_male_55_64", "type": "float", "mode": "nullable"}, {"name": "white_pop", "type": "float", "mode": "nullable"}, {"name": "worked_at_home", "type": "float", "mode": "nullable"}, {"name": "workers_16_and_over", "type": "float", "mode": "nullable"}, ], ) transform_csv >> load_to_bq
apache-2.0
rhythmsosad/polyglot
polyglot/text.py
5
17075
#!/usr/bin/env python # -*- coding: utf-8 -*- import sys import numpy as np from polyglot.base import Sequence, TextFile, TextFiles from polyglot.detect import Detector, Language from polyglot.decorators import cached_property from polyglot.downloader import Downloader from polyglot.load import load_embeddings, load_morfessor_model from polyglot.mapping import CountedVocabulary from polyglot.mixins import BlobComparableMixin, StringlikeMixin from polyglot.tag import get_pos_tagger, get_ner_tagger from polyglot.tokenize import SentenceTokenizer, WordTokenizer from polyglot.transliteration import Transliterator from polyglot.utils import _print from .mixins import basestring import six from six import text_type as unicode class BaseBlob(StringlikeMixin, BlobComparableMixin): """An abstract base class that Sentence, Text will inherit from. Includes words, POS tag, NP, and word count properties. Also includes basic dunder and string methods for making objects like Python strings. :param text: A string. """ def __init__(self, text): if not isinstance(text, basestring): raise TypeError('The `text` argument passed to `__init__(text)` ' 'must be a unicode string, not {0}'.format(type(text))) self.raw = text if not isinstance(text, unicode): self.raw = text.decode("utf-8") self.string = self.raw self.__lang = None @cached_property def detected_languages(self): return Detector(self.raw, quiet=True) @property def language(self): if self.__lang is None: self.__lang = self.detected_languages.language return self.__lang @language.setter def language(self, value): self.__lang = Language.from_code(value) @property def word_tokenizer(self): word_tokenizer = WordTokenizer(locale=self.language.code) return word_tokenizer @property def words(self): """Return a list of word tokens. This excludes punctuation characters. If you want to include punctuation characters, access the ``tokens`` property. :returns: A :class:`WordList <WordList>` of word tokens. """ return self.tokens @cached_property def tokens(self): """Return a list of tokens, using this blob's tokenizer object (defaults to :class:`WordTokenizer <textblob.tokenizers.WordTokenizer>`). """ seq = self.word_tokenizer.transform(Sequence(self.raw)) return WordList(seq.tokens(), parent=self, language=self.language.code) def tokenize(self, tokenizer=None): """Return a list of tokens, using ``tokenizer``. :param tokenizer: (optional) A tokenizer object. If None, defaults to this blob's default tokenizer. """ t = tokenizer if tokenizer is not None else self.tokenizer return WordList(t.tokenize(self.raw), parent=self) @cached_property def polarity(self): """Return the polarity score as a float within the range [-1.0, 1.0] """ scores = [w.polarity for w in self.words if w.polarity != 0] return sum(scores) / float(len(scores)) @cached_property def ne_chunker(self): return get_ner_tagger(lang=self.language.code) @cached_property def pos_tagger(self): return get_pos_tagger(lang=self.language.code) @cached_property def morpheme_analyzer(self): return load_morfessor_model(lang=self.language.code) def transliterate(self, target_language="en"): """Transliterate the string to the target language.""" return WordList([w.transliterate(target_language) for w in self.words], language=target_language, parent=self) @cached_property def morphemes(self): words, score = self.morpheme_analyzer.viterbi_segment(self.raw) return WordList(words, language=self.language.code, parent=self) @cached_property def entities(self): """Returns a list of entities for this blob.""" start = 0 end = 0 prev_tag = u'O' chunks = [] for i, (w, tag) in enumerate(self.ne_chunker.annotate(self.words)): if tag != prev_tag: if prev_tag == u'O': start = i else: chunks.append(Chunk(self.words[start: i], start, i, tag=prev_tag, parent=self)) prev_tag = tag if tag != u'O': chunks.append(Chunk(self.words[start: i+1], start, i+1, tag=tag, parent=self)) return chunks @cached_property def pos_tags(self): """Returns an list of tuples of the form (word, POS tag). Example: :: [('At', 'ADP'), ('eight', 'NUM'), ("o'clock", 'NOUN'), ('on', 'ADP'), ('Thursday', 'NOUN'), ('morning', 'NOUN')] :rtype: list of tuples """ tagged_words = [] for word,t in self.pos_tagger.annotate(self.words): word.pos_tag = t tagged_words.append((word, t)) return tagged_words @cached_property def word_counts(self): """Dictionary of word frequencies in this text. """ counts = defaultdict(int) for word in self.words: counts[word] += 1 return counts @cached_property def np_counts(self): """Dictionary of noun phrase frequencies in this text. """ counts = defaultdict(int) for phrase in self.noun_phrases: counts[phrase] += 1 return counts def ngrams(self, n=3): """Return a list of n-grams (tuples of n successive words) for this blob. :rtype: List of :class:`WordLists <WordList>` """ if n <= 0: return [] grams = [WordList(self.words[i:i+n], parent=self) for i in range(len(self.words) - n + 1)] return grams def detect_language(self): """Detect the blob's language using the Google Translate API. Requires an internet connection. Usage: :: >>> b = Text("bonjour") >>> b.language u'fr' """ return self.language.code def correct(self): """Attempt to correct the spelling of a blob. .. versionadded:: 0.6.0 :rtype: :class:`BaseBlob <BaseBlob>` """ # regex matches: contraction or word or punctuation or whitespace tokens = nltk.tokenize.regexp_tokenize(self.raw, "\w*('\w*)+|\w+|[^\w\s]|\s") corrected = (Word(w).correct() for w in tokens) ret = ''.join(corrected) return self.__class__(ret) def _cmpkey(self): """Key used by ComparableMixin to implement all rich comparison operators. """ return self.raw def _strkey(self): """Key used by StringlikeMixin to implement string methods.""" return self.raw def __hash__(self): return hash(self._cmpkey()) def __add__(self, other): '''Concatenates two text objects the same way Python strings are concatenated. Arguments: - `other`: a string or a text object ''' if isinstance(other, basestring): return self.__class__(self.raw + other) elif isinstance(other, BaseBlob): return self.__class__(self.raw + other.raw) else: raise TypeError('Operands must be either strings or {0} objects' .format(self.__class__.__name__)) def split(self, sep=None, maxsplit=sys.maxsize): """Behaves like the built-in str.split() except returns a WordList. :rtype: :class:`WordList <WordList>` """ return WordList(self._strkey().split(sep, maxsplit), parent=self) class Word(unicode): """A simple word representation. Includes methods for inflection, translation, and WordNet integration. """ def __new__(cls, string, language=None, pos_tag=None): """Return a new instance of the class. It is necessary to override this method in order to handle the extra pos_tag argument in the constructor. """ return super(Word, cls).__new__(cls, string) def __init__(self, string, language=None, pos_tag=None): self.string = string self.pos_tag = pos_tag self.__lang = language def __repr__(self): return repr(self.string) def __str__(self): return self.string @cached_property def morpheme_analyzer(self): return load_morfessor_model(lang=self.language) @cached_property def morphemes(self): words, score = self.morpheme_analyzer.viterbi_segment(self.string) return WordList(words, parent=self, language=self.language) @cached_property def detected_languages(self): return Detector(self.string, quiet=True) @property def language(self): if self.__lang is None: self.__lang = self.detected_languages.language.code return self.__lang @language.setter def language(self, value): self.__lang = value @property def vector(self): embeddings = load_embeddings(lang=self.language, type="sgns", task="embeddings") return embeddings[self.string] @property def neighbors(self): embeddings = load_embeddings(lang=self.language, type="sgns", task="embeddings") return embeddings.nearest_neighbors(self.string) @property def polarity(self): embeddings = load_embeddings(lang=self.language, type="", task="sentiment") return embeddings.get(self.string, [0])[0] def detect_language(self): """Detect the word's language.""" return self.language def transliterate(self, target_language="en"): """Transliterate the string to the target language.""" t = Transliterator(source_lang=self.language, target_lang=target_language) return t.transliterate(self.string) class WordList(list): """A list-like collection of words.""" def __init__(self, collection, parent=None, language="en"): """Initialize a WordList. Takes a collection of strings as its only argument. """ self._collection = [Word(w, language=language) for w in collection] self.parent = parent super(WordList, self).__init__(self._collection) def __str__(self): return str(self._collection) def __repr__(self): """Returns a string representation for debugging.""" class_name = self.__class__.__name__ return '{cls}({lst})'.format(cls=class_name, lst=repr(self._collection)) def __getitem__(self, key): """Returns a string at the given index.""" if isinstance(key, slice): return self.__class__(self._collection[key]) else: return self._collection[key] def __getslice__(self, i, j): # This is included for Python 2.* compatibility return self.__class__(self._collection[i:j]) def __iter__(self): return iter(self._collection) def count(self, strg, case_sensitive=False, *args, **kwargs): """Get the count of a word or phrase `s` within this WordList. :param strg: The string to count. :param case_sensitive: A boolean, whether or not the search is case-sensitive. """ if not case_sensitive: return [word.lower() for word in self].count(strg.lower(), *args, **kwargs) return self._collection.count(strg, *args, **kwargs) def append(self, obj): """Append an object to end. If the object is a string, appends a :class:`Word <Word>` object. """ if isinstance(obj, basestring): return self._collection.append(Word(obj)) else: return self._collection.append(obj) def extend(self, iterable): """Extend WordList by appending elements from ``iterable``. If an element is a string, appends a :class:`Word <Word>` object. """ [self._collection.append(Word(e) if isinstance(e, basestring) else e) for e in iterable] return self def upper(self): """Return a new WordList with each word upper-cased.""" return self.__class__([word.upper() for word in self]) def lower(self): """Return a new WordList with each word lower-cased.""" return self.__class__([word.lower() for word in self]) class Chunk(WordList): """A subsequence within a WordList object. Inherits from :class:`WordList <WordList>`. :param subsequence: A list, the raw sentence. :param start_index: An int, the index where this chunk begins in WordList. If not given, defaults to 0. :param end_index: An int, the index where this chunk ends in a WordList. If not given, defaults to the length of the sentence - 1. :param parent: Original Baseblob. """ def __init__(self, subsequence, start_index=0, end_index=None, tag="", parent=None): super(Chunk, self).__init__(collection=subsequence, parent=parent) #: The start index within a Text self.start = start_index #: The end index within a Text self.end = end_index or len(sentence) - 1 class_name = self.__class__.__name__ self.tag = tag if tag else class_name def __repr__(self): """Returns a string representation for debugging.""" return '{tag}({lst})'.format(tag=self.tag, lst=repr(self._collection)) @cached_property def positive_sentiment(self): """Positive sentiment of the entity.""" pos, neg = self._sentiment() return pos @cached_property def negative_sentiment(self): """Negative sentiment of the entity.""" pos, neg = self._sentiment() return neg def _sentiment(self, distance=True): """Calculates the sentiment of an entity as it appears in text.""" sum_pos = 0 sum_neg = 0 text = self.parent entity_positions = range(self.start, self.end) non_entity_positions = set(range(len(text.words))).difference(entity_positions) if not distance: non_entity_polarities = np.array([text.words[i].polarity for i in non_entity_positions]) sum_pos = sum(non_entity_polarities == 1) sum_neg = sum(non_entity_polarities == -1) else: polarities = np.array([w.polarity for w in text.words]) polarized_positions = np.argwhere(polarities != 0)[0] polarized_non_entity_positions = non_entity_positions.intersection(polarized_positions) sentence_len = len(text.words) for i in polarized_non_entity_positions: min_dist = min(abs(self.start - i), abs(self.end - i)) if text.words[i].polarity == 1: sum_pos += 1.0 - (min_dist - 1.0) / (2.0 * sentence_len) else: sum_neg += 1.0 - (min_dist - 1.0) / (2.0 *sentence_len) return (sum_pos, sum_neg) class Sentence(BaseBlob): """A sentence within a Text object. Inherits from :class:`BaseBlob <BaseBlob>`. :param sentence: A string, the raw sentence. :param start_index: An int, the index where this sentence begins in Text. If not given, defaults to 0. :param end_index: An int, the index where this sentence ends in a Text. If not given, defaults to the length of the sentence - 1. """ def __init__(self, sentence, start_index=0, end_index=None): super(Sentence, self).__init__(sentence) #: The start index within a Text self.start = start_index #: The end index within a Text self.end = end_index or len(sentence) - 1 @property def dict(self): '''The dict representation of this sentence.''' return { 'raw': self.raw, 'start_index': self.start_index, 'end_index': self.end_index, 'entities': self.entities, 'polarity': self.polarity, } class Text(BaseBlob): """. """ def __init__(self, text): super(Text, self).__init__(text) def __str__(self): if len(self.raw) > 1000: return u"{}...{}".format(self.raw[:500], self.raw[-500:]) else: return self.raw @property def sentences(self): """Return list of :class:`Sentence <Sentence>` objects.""" return self._create_sentence_objects() @property def raw_sentences(self): """List of strings, the raw sentences in the blob.""" return [sentence.raw for sentence in self.sentences] @property def serialized(self): """Returns a list of each sentence's dict representation.""" return [sentence.dict for sentence in self.sentences] def to_json(self, *args, **kwargs): '''Return a json representation (str) of this blob. Takes the same arguments as json.dumps. .. versionadded:: 0.5.1 ''' return json.dumps(self.serialized, *args, **kwargs) @property def json(self): '''The json representation of this blob. .. versionchanged:: 0.5.1 Made ``json`` a property instead of a method to restore backwards compatibility that was broken after version 0.4.0. ''' return self.to_json() def _create_sentence_objects(self): '''Returns a list of Sentence objects from the raw text. ''' sentence_objects = [] sent_tokenizer = SentenceTokenizer(locale=self.language.code) seq = Sequence(self.raw) seq = sent_tokenizer.transform(seq) for start_index, end_index in zip(seq.idx[:-1], seq.idx[1:]): # Sentences share the same models as their parent blob sent = seq.text[start_index: end_index].strip() if not sent: continue s = Sentence(sent, start_index=start_index, end_index=end_index) s.detected_languages = self.detected_languages sentence_objects.append(s) return sentence_objects
gpl-3.0
mclaughlin6464/pylearn2
pylearn2/models/svm.py
6
3259
"""Wrappers for SVM models.""" __authors__ = "Ian Goodfellow" __copyright__ = "Copyright 2010-2012, Universite de Montreal" __credits__ = ["Ian Goodfellow"] __license__ = "3-clause BSD" __maintainer__ = "LISA Lab" __email__ = "pylearn-dev@googlegroups" import numpy as np import warnings try: from sklearn.multiclass import OneVsRestClassifier from sklearn.svm import SVC except ImportError: warnings.warn("Could not import sklearn.") class OneVsRestClassifier(object): """ See `sklearn.multiclass.OneVsRestClassifier`. Notes ----- This class is a dummy class included so that sphinx can import DenseMulticlassSVM and document it even when sklearn is not installed. """ def __init__(self, estimator): raise RuntimeError("sklearn not available.") class DenseMulticlassSVM(OneVsRestClassifier): """ sklearn does very different things behind the scenes depending upon the exact identity of the class you use. The only way to get an SVM implementation that works with dense data is to use the `SVC` class, which implements one-against-one classification. This wrapper uses it to implement one-against- rest classification, which generally works better in my experiments. To avoid duplicating the training data, use only numpy ndarrays whose tags.c_contigous flag is true, and which are in float64 format. Parameters ---------- C : float SVM regularization parameter. See SVC.__init__ for details. kernel : str Type of kernel to use. See SVC.__init__ for details. gamma : float Optional parameter of kernel. See SVC.__init__ for details. coef0 : float Optional parameter of kernel. See SVC.__init__ for details. degree : int Degree of kernel, if kernel is polynomial. See SVC.__init__ for details. """ def __init__(self, C, kernel='rbf', gamma=1.0, coef0=1.0, degree=3): estimator = SVC(C=C, kernel=kernel, gamma=gamma, coef0=coef0, degree=degree) super(DenseMulticlassSVM, self).__init__(estimator) def fit(self, X, y): """ Fit underlying estimators. Parameters ---------- X : array-like, shape = [n_samples, n_features] Data. y : array-like, shape = [n_samples] or [n_samples, n_classes] Multi-class targets. An indicator matrix turns on multilabel classification. Returns ------- self """ super(DenseMulticlassSVM, self).fit(X, y) return self def decision_function(self, X): """ Returns the distance of each sample from the decision boundary for each class. Parameters ---------- X : array-like, shape = [n_samples, n_features] A 2D ndarray with each row containing the input features for one example. Returns ------- T : array-like, shape = [n_samples, n_classes] """ return np.column_stack([estimator.decision_function(X) for estimator in self.estimators_])
bsd-3-clause
tobegit3hub/deep_cnn
java_predict_client/src/main/proto/tensorflow/examples/learn/boston.py
25
1932
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Example of DNNRegressor for Housing dataset.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from sklearn import cross_validation from sklearn import metrics from sklearn import preprocessing import tensorflow as tf from tensorflow.contrib import learn def main(unused_argv): # Load dataset boston = learn.datasets.load_dataset('boston') x, y = boston.data, boston.target # Split dataset into train / test x_train, x_test, y_train, y_test = cross_validation.train_test_split( x, y, test_size=0.2, random_state=42) # Scale data (training set) to 0 mean and unit standard deviation. scaler = preprocessing.StandardScaler() x_train = scaler.fit_transform(x_train) # Build 2 layer fully connected DNN with 10, 10 units respectively. feature_columns = learn.infer_real_valued_columns_from_input(x_train) regressor = learn.DNNRegressor( feature_columns=feature_columns, hidden_units=[10, 10]) # Fit regressor.fit(x_train, y_train, steps=5000, batch_size=1) # Predict and score y_predicted = list( regressor.predict(scaler.transform(x_test), as_iterable=True)) score = metrics.mean_squared_error(y_predicted, y_test) print('MSE: {0:f}'.format(score)) if __name__ == '__main__': tf.app.run()
apache-2.0
yzl0083/orange
Orange/testing/unit/tests/test_logreg.py
6
2374
from Orange.testing import testing try: import unittest2 as unittest except: import unittest from orngLR import LogRegLearner, Univariate_LogRegLearner, StepWiseFSS, StepWiseFSS_Filter from Orange.classification.logreg import LibLinearLogRegLearner, dump import Orange def datasets_iter(): for name, (data,) in testing.datasets_iter(testing.CLASSIFICATION_DATASETS): if len(data.domain.class_var.values) == 2: yield name, (data,) @testing.data_driven(data_iter=datasets_iter()) class TestLogRegLearner(testing.LearnerTestCase): LEARNER = LogRegLearner @testing.test_on_data def test_learner_on(self, dataset): """ Test LogRegLearner. """ if len(dataset) < len(dataset.domain): raise unittest.SkipTest("Not enough examples") testing.LearnerTestCase.test_learner_on(self, dataset) @testing.data_driven(data_iter=datasets_iter()) class TestStepWiseFSS(unittest.TestCase): @testing.test_on_data def test_stepwise_fss_on(self, dataset): """ Test StepWiseFSS. """ if len(dataset) < len(dataset.domain): raise unittest.SkipTest("No enough examples") attrs = StepWiseFSS(dataset) new_dataset = StepWiseFSS_Filter(dataset) self.assertTrue([a1 == a2 for a1, a2 in zip(attrs, new_dataset.domain.attributes)]) @testing.datasets_driven(datasets=testing.CLASSIFICATION_DATASETS) class TestLibLinearLogRegLearner(testing.LearnerTestCase): LEARNER = LibLinearLogRegLearner @testing.test_on_data def test_learner_on(self, dataset): """ Test LibLinearLogRegLearner. """ testing.LearnerTestCase.test_learner_on(self, dataset) class TestUtils(unittest.TestCase): def test_dump(self): """Test for dump() failing (OverflowError: math range error on math.exp) on classifiers with high beta""" quality = Orange.feature.Discrete('quality') quality.add_value('low') quality.add_value('high') price = Orange.feature.Continuous('price') variables = [price, quality] matrix = [[0.01, 'high'], [0.001, 'low']] domain = Orange.data.Domain(variables) data = Orange.data.Table(domain, matrix) classifier = LogRegLearner(data) text_dump = dump(classifier) if __name__ == "__main__": unittest.main()
gpl-3.0
mfjb/scikit-learn
benchmarks/bench_mnist.py
153
6006
""" ======================= MNIST dataset benchmark ======================= Benchmark on the MNIST dataset. The dataset comprises 70,000 samples and 784 features. Here, we consider the task of predicting 10 classes - digits from 0 to 9 from their raw images. By contrast to the covertype dataset, the feature space is homogenous. Example of output : [..] Classification performance: =========================== Classifier train-time test-time error-rat ------------------------------------------------------------ Nystroem-SVM 105.07s 0.91s 0.0227 ExtraTrees 48.20s 1.22s 0.0288 RandomForest 47.17s 1.21s 0.0304 SampledRBF-SVM 140.45s 0.84s 0.0486 CART 22.84s 0.16s 0.1214 dummy 0.01s 0.02s 0.8973 """ from __future__ import division, print_function # Author: Issam H. Laradji # Arnaud Joly <arnaud.v.joly@gmail.com> # License: BSD 3 clause import os from time import time import argparse import numpy as np from sklearn.datasets import fetch_mldata from sklearn.datasets import get_data_home from sklearn.ensemble import ExtraTreesClassifier from sklearn.ensemble import RandomForestClassifier from sklearn.dummy import DummyClassifier from sklearn.externals.joblib import Memory from sklearn.kernel_approximation import Nystroem from sklearn.kernel_approximation import RBFSampler from sklearn.metrics import zero_one_loss from sklearn.pipeline import make_pipeline from sklearn.svm import LinearSVC from sklearn.tree import DecisionTreeClassifier from sklearn.utils import check_array # Memoize the data extraction and memory map the resulting # train / test splits in readonly mode memory = Memory(os.path.join(get_data_home(), 'mnist_benchmark_data'), mmap_mode='r') @memory.cache def load_data(dtype=np.float32, order='F'): """Load the data, then cache and memmap the train/test split""" ###################################################################### ## Load dataset print("Loading dataset...") data = fetch_mldata('MNIST original') X = check_array(data['data'], dtype=dtype, order=order) y = data["target"] # Normalize features X = X / 255 ## Create train-test split (as [Joachims, 2006]) print("Creating train-test split...") n_train = 60000 X_train = X[:n_train] y_train = y[:n_train] X_test = X[n_train:] y_test = y[n_train:] return X_train, X_test, y_train, y_test ESTIMATORS = { "dummy": DummyClassifier(), 'CART': DecisionTreeClassifier(), 'ExtraTrees': ExtraTreesClassifier(n_estimators=100), 'RandomForest': RandomForestClassifier(n_estimators=100), 'Nystroem-SVM': make_pipeline(Nystroem(gamma=0.015, n_components=1000), LinearSVC(C=100)), 'SampledRBF-SVM': make_pipeline(RBFSampler(gamma=0.015, n_components=1000), LinearSVC(C=100)) } if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument('--classifiers', nargs="+", choices=ESTIMATORS, type=str, default=['ExtraTrees', 'Nystroem-SVM'], help="list of classifiers to benchmark.") parser.add_argument('--n-jobs', nargs="?", default=1, type=int, help="Number of concurrently running workers for " "models that support parallelism.") parser.add_argument('--order', nargs="?", default="C", type=str, choices=["F", "C"], help="Allow to choose between fortran and C ordered " "data") parser.add_argument('--random-seed', nargs="?", default=0, type=int, help="Common seed used by random number generator.") args = vars(parser.parse_args()) print(__doc__) X_train, X_test, y_train, y_test = load_data(order=args["order"]) print("") print("Dataset statistics:") print("===================") print("%s %d" % ("number of features:".ljust(25), X_train.shape[1])) print("%s %d" % ("number of classes:".ljust(25), np.unique(y_train).size)) print("%s %s" % ("data type:".ljust(25), X_train.dtype)) print("%s %d (size=%dMB)" % ("number of train samples:".ljust(25), X_train.shape[0], int(X_train.nbytes / 1e6))) print("%s %d (size=%dMB)" % ("number of test samples:".ljust(25), X_test.shape[0], int(X_test.nbytes / 1e6))) print() print("Training Classifiers") print("====================") error, train_time, test_time = {}, {}, {} for name in sorted(args["classifiers"]): print("Training %s ... " % name, end="") estimator = ESTIMATORS[name] estimator_params = estimator.get_params() estimator.set_params(**{p: args["random_seed"] for p in estimator_params if p.endswith("random_state")}) if "n_jobs" in estimator_params: estimator.set_params(n_jobs=args["n_jobs"]) time_start = time() estimator.fit(X_train, y_train) train_time[name] = time() - time_start time_start = time() y_pred = estimator.predict(X_test) test_time[name] = time() - time_start error[name] = zero_one_loss(y_test, y_pred) print("done") print() print("Classification performance:") print("===========================") print("{0: <24} {1: >10} {2: >11} {3: >12}" "".format("Classifier ", "train-time", "test-time", "error-rate")) print("-" * 60) for name in sorted(args["classifiers"], key=error.get): print("{0: <23} {1: >10.2f}s {2: >10.2f}s {3: >12.4f}" "".format(name, train_time[name], test_time[name], error[name])) print()
bsd-3-clause
ssaeger/scikit-learn
sklearn/decomposition/tests/test_nmf.py
12
9004
import numpy as np from scipy import linalg from sklearn.decomposition import (NMF, ProjectedGradientNMF, non_negative_factorization) from sklearn.decomposition import nmf # For testing internals from scipy.sparse import csc_matrix from sklearn.utils.testing import assert_true from sklearn.utils.testing import assert_false from sklearn.utils.testing import assert_raise_message from sklearn.utils.testing import assert_array_almost_equal from sklearn.utils.testing import assert_almost_equal from sklearn.utils.testing import assert_greater from sklearn.utils.testing import assert_less from sklearn.utils.testing import ignore_warnings from sklearn.base import clone random_state = np.random.mtrand.RandomState(0) def test_initialize_nn_output(): # Test that initialization does not return negative values data = np.abs(random_state.randn(10, 10)) for init in ('random', 'nndsvd', 'nndsvda', 'nndsvdar'): W, H = nmf._initialize_nmf(data, 10, init=init, random_state=0) assert_false((W < 0).any() or (H < 0).any()) @ignore_warnings def test_parameter_checking(): A = np.ones((2, 2)) name = 'spam' msg = "Invalid solver parameter: got 'spam' instead of one of" assert_raise_message(ValueError, msg, NMF(solver=name).fit, A) msg = "Invalid init parameter: got 'spam' instead of one of" assert_raise_message(ValueError, msg, NMF(init=name).fit, A) msg = "Invalid sparseness parameter: got 'spam' instead of one of" assert_raise_message(ValueError, msg, NMF(sparseness=name).fit, A) msg = "Negative values in data passed to" assert_raise_message(ValueError, msg, NMF().fit, -A) assert_raise_message(ValueError, msg, nmf._initialize_nmf, -A, 2, 'nndsvd') clf = NMF(2, tol=0.1).fit(A) assert_raise_message(ValueError, msg, clf.transform, -A) def test_initialize_close(): # Test NNDSVD error # Test that _initialize_nmf error is less than the standard deviation of # the entries in the matrix. A = np.abs(random_state.randn(10, 10)) W, H = nmf._initialize_nmf(A, 10, init='nndsvd') error = linalg.norm(np.dot(W, H) - A) sdev = linalg.norm(A - A.mean()) assert_true(error <= sdev) def test_initialize_variants(): # Test NNDSVD variants correctness # Test that the variants 'nndsvda' and 'nndsvdar' differ from basic # 'nndsvd' only where the basic version has zeros. data = np.abs(random_state.randn(10, 10)) W0, H0 = nmf._initialize_nmf(data, 10, init='nndsvd') Wa, Ha = nmf._initialize_nmf(data, 10, init='nndsvda') War, Har = nmf._initialize_nmf(data, 10, init='nndsvdar', random_state=0) for ref, evl in ((W0, Wa), (W0, War), (H0, Ha), (H0, Har)): assert_true(np.allclose(evl[ref != 0], ref[ref != 0])) @ignore_warnings def test_nmf_fit_nn_output(): # Test that the decomposition does not contain negative values A = np.c_[5 * np.ones(5) - np.arange(1, 6), 5 * np.ones(5) + np.arange(1, 6)] for solver in ('pg', 'cd'): for init in (None, 'nndsvd', 'nndsvda', 'nndsvdar'): model = NMF(n_components=2, solver=solver, init=init, random_state=0) transf = model.fit_transform(A) assert_false((model.components_ < 0).any() or (transf < 0).any()) @ignore_warnings def test_nmf_fit_close(): # Test that the fit is not too far away for solver in ('pg', 'cd'): pnmf = NMF(5, solver=solver, init='nndsvd', random_state=0) X = np.abs(random_state.randn(6, 5)) assert_less(pnmf.fit(X).reconstruction_err_, 0.05) def test_nls_nn_output(): # Test that NLS solver doesn't return negative values A = np.arange(1, 5).reshape(1, -1) Ap, _, _ = nmf._nls_subproblem(np.dot(A.T, -A), A.T, A, 0.001, 100) assert_false((Ap < 0).any()) def test_nls_close(): # Test that the NLS results should be close A = np.arange(1, 5).reshape(1, -1) Ap, _, _ = nmf._nls_subproblem(np.dot(A.T, A), A.T, np.zeros_like(A), 0.001, 100) assert_true((np.abs(Ap - A) < 0.01).all()) @ignore_warnings def test_nmf_transform(): # Test that NMF.transform returns close values A = np.abs(random_state.randn(6, 5)) for solver in ('pg', 'cd'): m = NMF(solver=solver, n_components=4, init='nndsvd', random_state=0) ft = m.fit_transform(A) t = m.transform(A) assert_array_almost_equal(ft, t, decimal=2) @ignore_warnings def test_nmf_inverse_transform(): # Test that NMF.inverse_transform returns close values random_state = np.random.RandomState(0) A = np.abs(random_state.randn(6, 4)) for solver in ('pg', 'cd'): m = NMF(solver=solver, n_components=4, init='random', random_state=0) ft = m.fit_transform(A) t = m.transform(A) A_new = m.inverse_transform(t) assert_array_almost_equal(A, A_new, decimal=2) @ignore_warnings def test_n_components_greater_n_features(): # Smoke test for the case of more components than features. A = np.abs(random_state.randn(30, 10)) NMF(n_components=15, random_state=0, tol=1e-2).fit(A) @ignore_warnings def test_projgrad_nmf_sparseness(): # Test sparseness # Test that sparsity constraints actually increase sparseness in the # part where they are applied. tol = 1e-2 A = np.abs(random_state.randn(10, 10)) m = ProjectedGradientNMF(n_components=5, random_state=0, tol=tol).fit(A) data_sp = ProjectedGradientNMF(n_components=5, sparseness='data', random_state=0, tol=tol).fit(A).data_sparseness_ comp_sp = ProjectedGradientNMF(n_components=5, sparseness='components', random_state=0, tol=tol).fit(A).comp_sparseness_ assert_greater(data_sp, m.data_sparseness_) assert_greater(comp_sp, m.comp_sparseness_) @ignore_warnings def test_sparse_input(): # Test that sparse matrices are accepted as input from scipy.sparse import csc_matrix A = np.abs(random_state.randn(10, 10)) A[:, 2 * np.arange(5)] = 0 A_sparse = csc_matrix(A) for solver in ('pg', 'cd'): est1 = NMF(solver=solver, n_components=5, init='random', random_state=0, tol=1e-2) est2 = clone(est1) W1 = est1.fit_transform(A) W2 = est2.fit_transform(A_sparse) H1 = est1.components_ H2 = est2.components_ assert_array_almost_equal(W1, W2) assert_array_almost_equal(H1, H2) @ignore_warnings def test_sparse_transform(): # Test that transform works on sparse data. Issue #2124 A = np.abs(random_state.randn(3, 2)) A[A > 1.0] = 0 A = csc_matrix(A) for solver in ('pg', 'cd'): model = NMF(solver=solver, random_state=0, tol=1e-4, n_components=2) A_fit_tr = model.fit_transform(A) A_tr = model.transform(A) assert_array_almost_equal(A_fit_tr, A_tr, decimal=1) @ignore_warnings def test_non_negative_factorization_consistency(): # Test that the function is called in the same way, either directly # or through the NMF class A = np.abs(random_state.randn(10, 10)) A[:, 2 * np.arange(5)] = 0 for solver in ('pg', 'cd'): W_nmf, H, _ = non_negative_factorization( A, solver=solver, random_state=1, tol=1e-2) W_nmf_2, _, _ = non_negative_factorization( A, H=H, update_H=False, solver=solver, random_state=1, tol=1e-2) model_class = NMF(solver=solver, random_state=1, tol=1e-2) W_cls = model_class.fit_transform(A) W_cls_2 = model_class.transform(A) assert_array_almost_equal(W_nmf, W_cls, decimal=10) assert_array_almost_equal(W_nmf_2, W_cls_2, decimal=10) @ignore_warnings def test_non_negative_factorization_checking(): A = np.ones((2, 2)) # Test parameters checking is public function nnmf = non_negative_factorization msg = "Number of components must be positive; got (n_components='2')" assert_raise_message(ValueError, msg, nnmf, A, A, A, '2') msg = "Negative values in data passed to NMF (input H)" assert_raise_message(ValueError, msg, nnmf, A, A, -A, 2, 'custom') msg = "Negative values in data passed to NMF (input W)" assert_raise_message(ValueError, msg, nnmf, A, -A, A, 2, 'custom') msg = "Array passed to NMF (input H) is full of zeros" assert_raise_message(ValueError, msg, nnmf, A, A, 0 * A, 2, 'custom') def test_safe_compute_error(): A = np.abs(random_state.randn(10, 10)) A[:, 2 * np.arange(5)] = 0 A_sparse = csc_matrix(A) W, H = nmf._initialize_nmf(A, 5, init='random', random_state=0) error = nmf._safe_compute_error(A, W, H) error_sparse = nmf._safe_compute_error(A_sparse, W, H) assert_almost_equal(error, error_sparse)
bsd-3-clause
jballanc/openmicroscopy
components/tools/OmeroPy/test/integration/test_icontainer.py
4
21029
#!/usr/bin/env python # -*- coding: utf-8 -*- """ Integration test focused on the omero.api.IContainer interface a running server. Copyright 2008-2013 Glencoe Software, Inc. All rights reserved. Use is subject to license terms supplied in LICENSE.txt """ import test.integration.library as lib import pytest import omero from omero_model_PixelsI import PixelsI from omero_model_ImageI import ImageI from omero_model_DatasetI import DatasetI from omero_model_ExperimenterI import ExperimenterI from omero_model_ExperimenterGroupI import ExperimenterGroupI from omero_model_GroupExperimenterMapI import GroupExperimenterMapI from omero_model_DatasetImageLinkI import DatasetImageLinkI from omero_model_ImageAnnotationLinkI import ImageAnnotationLinkI from omero_model_CommentAnnotationI import CommentAnnotationI from omero.rtypes import rstring, rtime from uuid import uuid4 class TestIContainer(lib.ITest): def testFindAnnotations(self): ipojo = self.client.sf.getContainerService() i = ImageI() i.setName(rstring("name")) i.setAcquisitionDate(rtime(0)) i = ipojo.createDataObject(i,None) def testFindAndCountAnnotationsForSharedData(self): uuid = self.root.sf.getAdminService().getEventContext().sessionUuid query = self.root.sf.getQueryService() update = self.root.sf.getUpdateService() admin = self.root.sf.getAdminService() ipojo = self.root.sf.getContainerService() ### create new users #group1 new_gr1 = ExperimenterGroupI() new_gr1.name = rstring("group1_%s" % uuid) gid = admin.createGroup(new_gr1) #new user1 new_exp = ExperimenterI() new_exp.omeName = rstring("user1_%s" % uuid) new_exp.firstName = rstring("New") new_exp.lastName = rstring("Test") defaultGroup = admin.getGroup(gid) listOfGroups = list() listOfGroups.append(admin.lookupGroup("user")) eid = admin.createExperimenterWithPassword(new_exp, rstring("ome"), defaultGroup, listOfGroups) #new user2 new_exp2 = ExperimenterI() new_exp2.omeName = rstring("user2_%s" % uuid) new_exp2.firstName = rstring("New2") new_exp2.lastName = rstring("Test2") defaultGroup = admin.getGroup(gid) listOfGroups = list() listOfGroups.append(admin.lookupGroup("user")) eid2 = admin.createExperimenterWithPassword(new_exp2, rstring("ome"), defaultGroup, listOfGroups) ## get users user1 = admin.getExperimenter(eid) user2 = admin.getExperimenter(eid2) ## login as user1 cl1 = self.new_client(user=user1, password="ome") update1 = cl1.sf.getUpdateService() ipojo1 = cl1.sf.getContainerService() # create image img = ImageI() img.setName(rstring('test1154-img-%s' % (uuid))) img.setAcquisitionDate(rtime(0)) # default permission 'rw----': img = update1.saveAndReturnObject(img) img.unload() ann1 = CommentAnnotationI() ann1.textValue = rstring("user comment - %s" % uuid) l_ann1 = ImageAnnotationLinkI() l_ann1.setParent(img) l_ann1.setChild(ann1) update1.saveObject(l_ann1) #user retrives the annotations for image coll_count = ipojo1.getCollectionCount("Image", "ome.model.containers.Image_annotationLinks", [img.id.val], None) assert 1 == coll_count.get(img.id.val, []) #assert 1 == len(ipojo1.findAnnotations("Image", [img.id.val], None, None).get(img.id.val, [])) ## login as user2 cl2 = self.new_client(user=user2, password="ome") update2 = cl1.sf.getUpdateService() ann = CommentAnnotationI() ann.textValue = rstring("user2 comment - %s" % uuid) l_ann = ImageAnnotationLinkI() l_ann.setParent(img) l_ann.setChild(ann) update2.saveObject(l_ann) #do they see the same vals? #print ipojo1.getCollectionCount("Image", "ome.model.containers.Image_annotationLinks", [img.id.val], None) #print ipojo.getCollectionCount("Image", "ome.model.containers.Image_annotationLinks", [img.id.val], None) #print len(ipojo1.findAnnotations("Image", [img.id.val], None, None).get(img.id.val, [])) #print len(ipojo.findAnnotations("Image", [img.id.val], None, None).get(img.id.val, [])) coll_count = ipojo1.getCollectionCount("Image", "ome.model.containers.Image_annotationLinks", [img.id.val], None) assert 2 == coll_count.get(img.id.val, []) #anns = ipojo1.findAnnotations("Image", [img.id.val], None, None).get(img.id.val, []) #assert 2 == len(anns) #assert anns[0].details.permissions == 'rw----' #assert anns[1].details.permissions == 'rw----' cl1.sf.closeOnDestroy() cl2.sf.closeOnDestroy() def testCreateAfterBlitzPort(self): ipojo = self.client.sf.getContainerService() i = ImageI() i.setName(rstring("name")) i.setAcquisitionDate(rtime(0)) i = ipojo.createDataObject(i,None) o = i.getDetails().owner assert -1 == o.sizeOfGroupExperimenterMap() class TestSplitFilesets(lib.ITest): def checkSplitFilesets(self, client, dtypeIdsMap, expected): """ To check we get the expected result from iContainer.getImagesBySplitFilesets() we do the query with dtype & ids and compare the returned data with the specified dict. """ container = client.sf.getContainerService() result = container.getImagesBySplitFilesets(dtypeIdsMap, None) def cmpLists(listOne, listTwo): """ Returns True if both lists have the same items """ if (len(listOne) != len(listTwo)): return False for one in listOne: if one not in listTwo: return False return True # compare result with expected... assert set(result.keys()) == set(expected.keys()), "Result should have expected Fileset IDs" for fsId, expectedDict in expected.items(): assert cmpLists(expectedDict[True], result[fsId][True]), "True ImageIDs should match" assert cmpLists(expectedDict[False], result[fsId][False]), "False ImageIDs should match" def testFilesetSplitByImage(self): """ Fileset of 2 Images, we test split using 1 Image ID """ client, user = self.new_client_and_user(perms="rw----") images = self.importMIF(2, client=client) # Lookup the fileset imgId = images[0].id.val query = client.sf.getQueryService() filesetId = query.get('Image', imgId).fileset.id.val # Define what we expect & query split fileset expected = {filesetId: {True: [imgId], False: [images[1].id.val]}} self.checkSplitFilesets(client, {'Image': [imgId]}, expected) def testFilesetNotSplitByImage(self): """ Fileset of 2 Images with No split (query with both Image IDs) """ client, user = self.new_client_and_user(perms="rw----") images = self.importMIF(2, client=client) # Lookup the fileset imgIds = [i.id.val for i in images] query = client.sf.getQueryService() filesetId = query.get('Image', imgIds[0]).fileset.id.val # Define what we expect & query split fileset expected = {} self.checkSplitFilesets(client, {'Image': imgIds}, expected) def testFilesetSplitByDatasetAndProject(self): """ Fileset of 2 Images, one in a Dataset. Test split using Dataset ID """ client, user = self.new_client_and_user(perms="rw----") update = client.sf.getUpdateService() query = client.sf.getQueryService() # Dataset contains 1 image of a 2-image fileset images = self.importMIF(2, client=client) ds = omero.model.DatasetI() ds.name = omero.rtypes.rstring("testFilesetSplitByDataset") ds = update.saveAndReturnObject(ds) link = omero.model.DatasetImageLinkI() link.setParent(ds.proxy()) link.setChild(images[0].proxy()) link = update.saveAndReturnObject(link) # Dataset in Project pr = omero.model.ProjectI() pr.name = omero.rtypes.rstring("testFilesetSplitByProject") pr = update.saveAndReturnObject(pr) link = omero.model.ProjectDatasetLinkI() link.setParent(pr.proxy()) link.setChild(ds.proxy()) link = update.saveAndReturnObject(link) # Lookup the fileset imgId = images[0].id.val filesetId = query.get('Image', imgId).fileset.id.val # Define what we expect & query split fileset expected = {filesetId: {True: [imgId], False: [images[1].id.val]}} self.checkSplitFilesets(client, {'Dataset': [ds.id.val]}, expected) # Expect same result if query via Project self.checkSplitFilesets(client, {'Project': [pr.id.val]}, expected) # No split if we include the extra image ID expected = {} idsMap = {'Dataset': [ds.id.val], "Image": [images[1].id.val]} self.checkSplitFilesets(client, idsMap, expected) idsMap = {'Project': [pr.id.val], "Image": [images[1].id.val]} self.checkSplitFilesets(client, idsMap, expected) def testFilesetNotSplitByDatasets(self): """ Fileset of 2 Images, both in different Datasets. Test Not split using Dataset IDs """ client, user = self.new_client_and_user(perms="rw----") update = client.sf.getUpdateService() query = client.sf.getQueryService() # Datasets each contain 1 image of a 2-image fileset datasets = self.createDatasets(2, "testFilesetNotSplitByDatasets", client=client) images = self.importMIF(2, client=client) for i in range(2): link = omero.model.DatasetImageLinkI() link.setParent(datasets[i].proxy()) link.setChild(images[i].proxy()) link = update.saveAndReturnObject(link) # Another Dataset contains both images ds = omero.model.DatasetI() ds.name = omero.rtypes.rstring("testFilesetNotSplitByDatasets") ds = update.saveAndReturnObject(ds) for i in images: link = omero.model.DatasetImageLinkI() link.setParent(ds.proxy()) link.setChild(i.proxy()) link = update.saveAndReturnObject(link) # Lookup the fileset imgId = images[0].id.val filesetId = query.get('Image', imgId).fileset.id.val # No split if we pass in both Dataset IDs... dsIds = [d.id.val for d in datasets] expected = {} self.checkSplitFilesets(client, {'Dataset': dsIds}, expected) # ...or the Dataset that contains both images self.checkSplitFilesets(client, {'Dataset': [ds.id.val]}, expected) # confirm split if we choose one Dataset expected = {filesetId: {True: [imgId], False: [images[1].id.val]}} self.checkSplitFilesets(client, {'Dataset': [datasets[0].id.val]}, expected) def testGetImagesBySplitFilesetsManyCases(self): query = self.client.sf.getQueryService() update = self.client.sf.getUpdateService() ipojo = self.client.sf.getContainerService() admin = self.client.sf.getAdminService() eventContext = admin.getEventContext() # entity hierarchy project_dataset_hierarchy = [(0, [0,1])] dataset_image_hierarchy = [(0, [0,1]), (1, [2,6]), (2, [3,4,5])] screen_plate_hierarchy = [(0, [0,1])] plate_well_hierarchy = [(0, [0,1]), (1, [2,6]), (2, [3,4,5])] well_image_hierarchy = [(0, [0]), (1, [1]), (2, [2]), (3, [3]), (4, [4]), (5, [5])] fileset_image_hierarchy = [(0, [0]), (1, [1,2]), (2, [3,4,5])] # test data, input and expected output value, is by list index test_cases = [({'Image': [0,1,2,3,4,5]}, {}), ({'Image': [6]}, {}), ({'Image': [0,1]}, {1: {True: [1], False: [2]}}), ({'Image': [3]}, {2: {True: [3], False: [4,5]}}), ({'Image': [5]}, {2: {True: [5], False: [3,4]}}), ({'Image': [3,4]}, {2: {True: [3,4], False: [5]}}), ({'Image': [0,1,5,6]}, {1: {True: [1], False: [2]}, 2: {True: [5], False: [3,4]}}), ({'Fileset': [0], 'Image': [0,3,4,5,6]}, {}), ({'Fileset': [0,1,2], 'Image': [0]}, {}), ({'Well': [0,1,2,3,4,5]}, {}), ({'Well': [6]}, {}), ({'Well': [0,1]}, {1: {True: [1], False: [2]}}), ({'Well': [3]}, {2: {True: [3], False: [4,5]}}), ({'Well': [5]}, {2: {True: [5], False: [3,4]}}), ({'Well': [3,4]}, {2: {True: [3,4], False: [5]}}), ({'Well': [0,1,5,6]}, {1: {True: [1], False: [2]}, 2: {True: [5], False: [3,4]}}), ({'Image': [0,1], 'Well': [5,6]}, {1: {True: [1], False: [2]}, 2: {True: [5], False: [3,4]}}), ({'Fileset': [0], 'Well': [0,3,4,5,6]}, {}), ({'Fileset': [0,1,2], 'Well': [0]}, {}), ({'Fileset': [2]}, {}), ({'Dataset': [0]}, {1: {True: [1], False: [2]}}), ({'Dataset': [1]}, {1: {True: [2], False: [1]}}), ({'Dataset': [2]}, {}), ({'Dataset': [1], 'Image': [0,1]}, {}), ({'Project': [0]}, {}), ({'Project': [0], 'Image': [3]}, {2: {True: [3], False: [4,5]}}), ({'Dataset': [0], 'Fileset': [1]}, {}), ({'Plate': [0]}, {1: {True: [1], False: [2]}}), ({'Plate': [1]}, {1: {True: [2], False: [1]}}), ({'Plate': [2]}, {}), ({'Plate': [1], 'Image': [0,1]}, {}), ({'Plate': [1], 'Well': [0,1]}, {}), ({'Plate': [1], 'Image': [0], 'Well': [1]}, {}), ({'Screen': [0]}, {}), ({'Screen': [0], 'Image': [3]}, {2: {True: [3], False: [4,5]}}), ({'Screen': [0], 'Well': [3]}, {2: {True: [3], False: [4,5]}}), ({'Screen': [0], 'Image': [3], 'Well': [3]}, {2: {True: [3], False: [4,5]}}), ({'Plate': [0], 'Fileset': [1]}, {})] # TODO: consider factoring some of the below out into library functions for use by other tests # name entity lists projects = [] datasets = [] screens = [] plates = [] wells = [] filesets = [] images = [] named_entities = {'Project': projects, 'Dataset': datasets, 'Screen': screens, 'Plate': plates, 'Well': wells, 'Fileset': filesets, 'Image': images} # note all test case input values all_inputs = {} for name in named_entities.keys(): all_inputs[name] = [] for input, expected in test_cases: for name, ids in input.items(): all_inputs[name] += ids # create test entities named in test case input values parents = lambda hierarchy: [ from_index for from_index, to_indices in hierarchy ] children = lambda hierarchy: sum([ to_indices for from_index, to_indices in hierarchy ], []) for project_index in set(all_inputs['Project'] + parents(project_dataset_hierarchy)): project = omero.model.ProjectI() project.name = rstring('Project #%i' % project_index) project.id = update.saveAndReturnObject(project).id projects.append(query.get('Project', project.id.val)) for dataset_index in set(all_inputs['Dataset'] + children(project_dataset_hierarchy) + parents(dataset_image_hierarchy)): dataset = omero.model.DatasetI() dataset.name = rstring('Dataset #%i' % dataset_index) dataset.id = update.saveAndReturnObject(dataset).id datasets.append(query.get('Dataset', dataset.id.val)) for screen_index in set(all_inputs['Screen'] + parents(screen_plate_hierarchy)): screen = omero.model.ScreenI() screen.name = rstring('Screen #%i' % screen_index) screen.id = update.saveAndReturnObject(screen).id screens.append(query.get('Screen', screen.id.val)) for plate_index in set(all_inputs['Plate'] + children(screen_plate_hierarchy) + parents(plate_well_hierarchy)): plate = omero.model.PlateI() plate.name = rstring('Plate #%i' % plate_index) plate.id = update.saveAndReturnObject(plate).id plates.append(query.get('Plate', plate.id.val)) for well_index in set(all_inputs['Well'] + children(plate_well_hierarchy) + parents(well_image_hierarchy)): well = omero.model.WellI() wells.append(well) # cannot save until attached to plate for fileset_index in set(all_inputs['Fileset'] + parents(fileset_image_hierarchy)): fileset = omero.model.FilesetI() fileset.templatePrefix = rstring('%s_%i/%s' % (eventContext.userName, eventContext.userId, uuid4())) fileset.id = update.saveAndReturnObject(fileset).id filesets.append(query.get('Fileset', fileset.id.val)) for image_index in set(all_inputs['Image'] + children(dataset_image_hierarchy) + children(well_image_hierarchy) + children(fileset_image_hierarchy)): image = omero.model.ImageI() image.name = rstring('Image #%i' % image_index) image.acquisitionDate = rtime(0L) image.id = update.saveAndReturnObject(image).id images.append(query.get('Image', image.id.val)) # associate test entities for project_index, dataset_indices in project_dataset_hierarchy: for dataset_index in dataset_indices: project_dataset = omero.model.ProjectDatasetLinkI() project_dataset.parent = projects[project_index] project_dataset.child = datasets[dataset_index] update.saveAndReturnObject(project_dataset) for dataset_index, image_indices in dataset_image_hierarchy: for image_index in image_indices: dataset_image = omero.model.DatasetImageLinkI() dataset_image.parent = datasets[dataset_index] dataset_image.child = images[image_index] update.saveAndReturnObject(dataset_image) for screen_index, plate_indices in screen_plate_hierarchy: for plate_index in plate_indices: screen_plate = omero.model.ScreenPlateLinkI() screen_plate.parent = screens[screen_index] screen_plate.child = plates[plate_index] update.saveAndReturnObject(screen_plate) for plate_index, well_indices in plate_well_hierarchy: for well_index in well_indices: wells[well_index].plate = plates[plate_index] for well_index, image_indices in well_image_hierarchy: for image_index in image_indices: well_sample = omero.model.WellSampleI() well_sample.well = wells[well_index] well_sample.image = images[image_index] wells[well_index].addWellSample(well_sample) for well in named_entities['Well']: well.id = update.saveAndReturnObject(well).id for fileset_index, image_indices in fileset_image_hierarchy: for image_index in image_indices: images[image_index].fileset = filesets[fileset_index] update.saveAndReturnObject(images[image_index]) # translate list indices into database IDs and check that test cases run as expected for named_indices, fileset_split in test_cases: referenced = {} for name, indices in named_indices.items(): referenced[name] = [ named_entities[name][index].id.val for index in indices ] expected = {} for fileset_index, image_indices in fileset_split.items(): fileset_id = filesets[fileset_index].id.val expected[fileset_id] = {} for included in [False, True]: expected[fileset_id][included] = [ images[image_index].id.val for image_index in image_indices[included] ] if ipojo.getImagesBySplitFilesets(referenced, None) != expected: raise Exception('for referenced ' + str(named_indices) + ' expected ' + str(fileset_split))
gpl-2.0
fbuentello/NBA-Machine-Learning-Tutorial
runNBA_Data.py
1
3729
# runNBA_Data.py import time import pandas as pd import numpy as np # Machine Learning algorithms from sklearn.pipeline import Pipeline from sklearn.linear_model import LinearRegression from sklearn.preprocessing import PolynomialFeatures,scale from sklearn.cross_validation import train_test_split, KFold from sklearn.learning_curve import learning_curve # Plot modules import matplotlib.pyplot as plt from matplotlib import style style.use("ggplot") pd.options.display.max_columns = 50 pd.set_option('expand_frame_repr', False) # Custom modules from nbaImport import readMongo, WANTED_FEATURES, PER_FEATURES def flatten(objToFlatten): return [item for sublist in objToFlatten for item in sublist] def BuildDataSet(): # 1 nbaFrame = readMongo(db='YOUR DATABASE',collection='above50Games', query= {}, queryReturn=WANTED_FEATURES, no_id=False, mongo_uri='YOUR URI') # 2 statsDF = pd.DataFrame(list(flatten(nbaFrame.Seasons))) print(statsDF) # 1 stats = pd.DataFrame(list(statsDF.totals.values)) stats['FT_M'] = stats['FTA'] - stats['FT'] stats['FG_M'] = stats['FGA'] - stats['FG'] stats[PER_FEATURES] = stats[PER_FEATURES].astype(float) # 2 stats['PER'] = pd.DataFrame(list(statsDF.advanced.values)) # 3 stats = stats.reindex(np.random.permutation(stats.index)) X = np.array(stats[PER_FEATURES].values) y = (stats["PER"].values.tolist()) return X,y def PlotLearningCurve(X_data, y_data,algorithm, s_time): print('PlotLearningCurve called') # 1 sizes = np.array([.1,.2,.5,.8,.99]) train_sizes, train_scores, test_scores = learning_curve( algorithm, X_data, y_data, train_sizes=sizes) print('after learning_curve') train_mean = np.mean(train_scores, axis=1) train_std = np.std(train_scores, axis=1) test_mean = np.mean(test_scores, axis=1) test_std = np.std(test_scores, axis=1) # 2 plt.figure(figsize=(15,10)) # Width, Height # Training Set plt.fill_between(train_sizes, train_mean-train_std, train_mean+train_std, alpha=0.1, color="r") # Cross Validation Set plt.fill_between(train_sizes, test_mean-test_std, test_mean+test_std, alpha=0.1, color="g") # Graph Legend text trainLabel = ('%.3f%% Training score' % (train_mean[4])) testLabel = ('%.3f%% Cross-validation score' % (test_mean[4])) # Plot lines plt.plot(train_sizes, train_mean, 'o-', color="r", label=trainLabel) plt.plot(train_sizes, test_mean, 'o-', color="g", label=testLabel) # Place title, X-axis label, Y-axis label plt.suptitle('Linear Regression: NBA PER', fontsize=20) plt.xlabel('Training examples') plt.ylabel('Accuracy') # Set limit on Y-axis, Place graph legend plt.ylim((0.5, 1.1)) plt.xlim((0, 6500)) plt.legend(loc="best") # Print duration of program print("--- %s seconds ---" % (time.time() - s_time)) plt.show() def Analysis(_deg=1): start_time = time.time() # 1 X, y = BuildDataSet() linear_regression = LinearRegression() # 2 polynomial_features = PolynomialFeatures(degree=_deg, include_bias=False) # 3 algorithm = Pipeline([("polynomial_features", polynomial_features), ("linear_regression", linear_regression)]) #========================================================================== */ print('after Pipeline') # 4 PlotLearningCurve(X, y, algorithm, start_time) Analysis(3)
apache-2.0
waterponey/scikit-learn
examples/decomposition/plot_faces_decomposition.py
42
4843
""" ============================ Faces dataset decompositions ============================ This example applies to :ref:`olivetti_faces` different unsupervised matrix decomposition (dimension reduction) methods from the module :py:mod:`sklearn.decomposition` (see the documentation chapter :ref:`decompositions`) . """ print(__doc__) # Authors: Vlad Niculae, Alexandre Gramfort # License: BSD 3 clause import logging from time import time from numpy.random import RandomState import matplotlib.pyplot as plt from sklearn.datasets import fetch_olivetti_faces from sklearn.cluster import MiniBatchKMeans from sklearn import decomposition # Display progress logs on stdout logging.basicConfig(level=logging.INFO, format='%(asctime)s %(levelname)s %(message)s') n_row, n_col = 2, 3 n_components = n_row * n_col image_shape = (64, 64) rng = RandomState(0) ############################################################################### # Load faces data dataset = fetch_olivetti_faces(shuffle=True, random_state=rng) faces = dataset.data n_samples, n_features = faces.shape # global centering faces_centered = faces - faces.mean(axis=0) # local centering faces_centered -= faces_centered.mean(axis=1).reshape(n_samples, -1) print("Dataset consists of %d faces" % n_samples) ############################################################################### def plot_gallery(title, images, n_col=n_col, n_row=n_row): plt.figure(figsize=(2. * n_col, 2.26 * n_row)) plt.suptitle(title, size=16) for i, comp in enumerate(images): plt.subplot(n_row, n_col, i + 1) vmax = max(comp.max(), -comp.min()) plt.imshow(comp.reshape(image_shape), cmap=plt.cm.gray, interpolation='nearest', vmin=-vmax, vmax=vmax) plt.xticks(()) plt.yticks(()) plt.subplots_adjust(0.01, 0.05, 0.99, 0.93, 0.04, 0.) ############################################################################### # List of the different estimators, whether to center and transpose the # problem, and whether the transformer uses the clustering API. estimators = [ ('Eigenfaces - PCA using randomized SVD', decomposition.PCA(n_components=n_components, svd_solver='randomized', whiten=True), True), ('Non-negative components - NMF', decomposition.NMF(n_components=n_components, init='nndsvda', tol=5e-3), False), ('Independent components - FastICA', decomposition.FastICA(n_components=n_components, whiten=True), True), ('Sparse comp. - MiniBatchSparsePCA', decomposition.MiniBatchSparsePCA(n_components=n_components, alpha=0.8, n_iter=100, batch_size=3, random_state=rng), True), ('MiniBatchDictionaryLearning', decomposition.MiniBatchDictionaryLearning(n_components=15, alpha=0.1, n_iter=50, batch_size=3, random_state=rng), True), ('Cluster centers - MiniBatchKMeans', MiniBatchKMeans(n_clusters=n_components, tol=1e-3, batch_size=20, max_iter=50, random_state=rng), True), ('Factor Analysis components - FA', decomposition.FactorAnalysis(n_components=n_components, max_iter=2), True), ] ############################################################################### # Plot a sample of the input data plot_gallery("First centered Olivetti faces", faces_centered[:n_components]) ############################################################################### # Do the estimation and plot it for name, estimator, center in estimators: print("Extracting the top %d %s..." % (n_components, name)) t0 = time() data = faces if center: data = faces_centered estimator.fit(data) train_time = (time() - t0) print("done in %0.3fs" % train_time) if hasattr(estimator, 'cluster_centers_'): components_ = estimator.cluster_centers_ else: components_ = estimator.components_ # Plot an image representing the pixelwise variance provided by the # estimator e.g its noise_variance_ attribute. The Eigenfaces estimator, # via the PCA decomposition, also provides a scalar noise_variance_ # (the mean of pixelwise variance) that cannot be displayed as an image # so we skip it. if (hasattr(estimator, 'noise_variance_') and estimator.noise_variance_.ndim > 0): # Skip the Eigenfaces case plot_gallery("Pixelwise variance", estimator.noise_variance_.reshape(1, -1), n_col=1, n_row=1) plot_gallery('%s - Train time %.1fs' % (name, train_time), components_[:n_components]) plt.show()
bsd-3-clause
pompiduskus/scikit-learn
sklearn/metrics/metrics.py
232
1262
import warnings warnings.warn("sklearn.metrics.metrics is deprecated and will be removed in " "0.18. Please import from sklearn.metrics", DeprecationWarning) from .ranking import auc from .ranking import average_precision_score from .ranking import label_ranking_average_precision_score from .ranking import precision_recall_curve from .ranking import roc_auc_score from .ranking import roc_curve from .classification import accuracy_score from .classification import classification_report from .classification import confusion_matrix from .classification import f1_score from .classification import fbeta_score from .classification import hamming_loss from .classification import hinge_loss from .classification import jaccard_similarity_score from .classification import log_loss from .classification import matthews_corrcoef from .classification import precision_recall_fscore_support from .classification import precision_score from .classification import recall_score from .classification import zero_one_loss from .regression import explained_variance_score from .regression import mean_absolute_error from .regression import mean_squared_error from .regression import median_absolute_error from .regression import r2_score
bsd-3-clause
mne-tools/mne-tools.github.io
stable/_downloads/52b26bfb61145291f5108dc7fd05ccee/35_artifact_correction_regression.py
5
9910
# -*- coding: utf-8 -*- """ .. _tut-artifact-regression: =================================== Repairing artifacts with regression =================================== This tutorial covers removal of artifacts using regression as in Gratton et al. (1983) :footcite:`GrattonEtAl1983` and Croft & Barry (2000) :footcite:`CroftBarry2000`. Generally speaking, artifacts that result in time waveforms on the sensors that are accurately reflected by some reference signal can be removed by regression. Blink artifacts captured by bipolar EOG channels provide a good example of this, so we will demonstrate this here. Although ECG signals are well captured by bipolar ECG electrodes, regression-based removal of ECG artifacts usually does not work very well. This is likely because the heart acts like a rotating dipole, and therefore the ECG channel time waveform recorded from the ECG electrode sites does not reflect the same temporal dynamics that manifest at each MEG channel (obtained by sampling some component of the related magnetic vector field). Other approaches like :ref:`ICA <tut-artifact-ica>` or :ref:`SSP <tut-artifact-ssp>` will likely work better for ECG. Furthermore, regression approaches are usually performed in situations where there are few channels available, and removing an entire signal component is undesirable. Hence, most articles on the topic concern EEG and it is unusual to see the technique applied to MEG. For this reason, we will restrict the analysis in this tutorial to EEG data only. Prepare the data ^^^^^^^^^^^^^^^^ We begin as always by importing the necessary Python modules and loading some data. The :ref:`MNE-Sample <sample-dataset>` dataset has some clear, large blink artifacts, especially during the presentation of visual stimuli. """ # %% import numpy as np import mne from mne.preprocessing import EOGRegression data_path = mne.datasets.sample.data_path() raw_fname = data_path / 'MEG' / 'sample' / 'sample_audvis_raw.fif' raw = mne.io.read_raw_fif(raw_fname) raw.pick(['eeg', 'eog', 'stim']) raw.load_data() # The regression technique works regardless of chosen reference. However, it is # important to choose a reference before proceeding with the analysis. raw.set_eeg_reference('average') # Removing slow drifts makes for more stable regression coefficients. Make sure # to apply the same filter to both EEG and EOG channels! raw.filter(0.3, 40) # make epochs events = mne.find_events(raw) event_id = {'visual/left': 3, 'visual/right': 4} epochs = mne.Epochs(raw, events, event_id=event_id, preload=True) # %% # Visualize the original data # ^^^^^^^^^^^^^^^^^^^^^^^^^^^ # Let's first look at the `~mne.Evoked` data (average across epochs) without # any corrections applied. # we'll try to keep a consistent ylim across figures plot_kwargs = dict(picks='all', ylim=dict(eeg=(-10, 10), eog=(-5, 15))) # plot the evoked for the EEG and the EOG sensors fig = epochs.average('all').plot(**plot_kwargs) fig.set_size_inches(6, 6) # %% # We can see there is some EOG activity that is likely bleeding into the EEG # evoked response. At around 250ms this becomes especially noticeable. Let's # apply regression to subtract the EOG signal from the EEG signals to clean it # up. # %% # Compute and apply EOG regression # ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ # Now, we'll compare the evoked response before and after we regress out the # EOG signal. First, let's try plain regression, and then we'll explore more # advanced techniques. # Perform regression using the EOG sensor as independent variable and the EEG # sensors as dependent variables. model_plain = EOGRegression(picks='eeg', picks_artifact='eog').fit(epochs) fig = model_plain.plot(vlim=(None, 0.4)) # regression coefficients as topomap fig.set_size_inches(3, 2) # %% # The regression coefficients show the linear relationship between each EEG # sensor and the EOG sensor. Note that occipital sensors have a positive # relationship, as we set a common-average reference when we loaded the data # above. # # Now we are ready to use these coefficients to subtract the EOG signal from # the EEG signals. epochs_clean_plain = model_plain.apply(epochs) # After regression, we should redo the baseline correction epochs_clean_plain.apply_baseline() # Show the evoked potential computed on the corrected data fig = epochs_clean_plain.average('all').plot(**plot_kwargs) fig.set_size_inches(6, 6) # %% # Regressing the EOG signal out of the EEG signals has reduced the peak around # 250ms that was partly there because of eye artifacts. # # In the :ref:`MNE-Sample dataset <sample-dataset>`, there are no segments of # data that are particularly unstable, so the basic form of regression produces # robust coefficients. However, this may not be the case in every dataset, so # let's explore some variations that may improve the estimation of the # regression coefficients. # # One potential problem is that the EOG sensor does not only pick up eye # artifacts, but also a bit of EEG signal. This means we are prone to # overestimating the regression coefficients if the EOG sensors are placed too # close to the EEG sensors. However, there is a correction we can apply to # alleviate this. # # Subtract the evoked response from the epoch data before regression # ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ # Gratton et al. (1983) :footcite:`GrattonEtAl1983` suggest computing # regression coefficients on epoch data with the evoked response subtracted # out. The idea is that the EEG signal components relevant to the study are in # the evoked, so by removing them, mostly noise components will be left. Since # EOG artifacts are unlikely to be strictly time-locked to the stimulus onset, # enough EOG information will likely remain to be able to estimate robust # regression coefficients. # create epochs with the evoked subtracted out epochs_sub = epochs.copy().subtract_evoked() # perform regression model_sub = EOGRegression(picks='eeg', picks_artifact='eog').fit(epochs_sub) fig = model_sub.plot(vlim=(None, 0.4)) fig.set_size_inches(3, 2) # apply the regression coefficients to the original epochs epochs_clean_sub = model_plain.apply(epochs).apply_baseline() fig = epochs_clean_sub.average('all').plot(**plot_kwargs) fig.set_size_inches(6, 6) # %% # We see that we obtain the same regression coefficients, even with the evoked # removed from the epochs. # # Create EOG evoked before regression # ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ # It is advantageous to estimate the regression coefficients on a piece of data # with lots of EOG activity. As EOG activity is typically much larger than EEG, # the EOG artifacts will dominate the signal and the regression coefficients # will reflect mostly the influence of the EOG. To amplify this effect, Croft & # Barry (2000) :footcite:`CroftBarry2000` suggest creating epochs based on # blink onsets and computing the evoked blink response. The averaging procedure # will suppress EEG signals that are not strictly time-locked with the blink # response. Ideally, one would create evokeds for both blinks and saccades, and # create two separate regression models. However, we will restrict ourselves to # just blink epochs, since MNE-Python contains an automated method for creating # those. # # .. note:: This is very similar to the approach taken by :ref:`SSP # <tut-artifact-ssp>`. The difference is that :ref:`SSP # <tut-artifact-ssp>` estimates signal components that are maximally # correlated with the artifact and removes any data along that # component (thereby reducing the rank of the non-EOG data), whereas # the regression approach uses the ongoing EOG signal to determine # how much data to remove (thereby not necessarily reducing the rank # of the non-EOG data). Generally, SSP tends to err on the side of # removing too much data, eliminating artifacts and true brain # signals alike, whereas regression will err on the side of not # removing enough, leaving some artifact signals still present in the # signal. eog_epochs = mne.preprocessing.create_eog_epochs(raw) # We need to explicitly specify that we want to average the EOG channel too. eog_evoked = eog_epochs.average('all') eog_evoked.plot('all') fig.set_size_inches(6, 6) # perform regression on the evoked blink response model_evoked = EOGRegression(picks='eeg', picks_artifact='eog').fit(eog_evoked) fig = model_evoked.plot(vlim=(None, 0.4)) fig.set_size_inches(3, 2) # apply the regression coefficients to the original epochs epochs_clean_evoked = model_evoked.apply(epochs).apply_baseline() fig = epochs_clean_evoked.average('all').plot(**plot_kwargs) fig.set_size_inches(6, 6) # for good measure, also show the effect on the blink evoked eog_evoked_clean = model_evoked.apply(eog_evoked) eog_evoked_clean.apply_baseline() eog_evoked_clean.plot('all') fig.set_size_inches(6, 6) # %% # We see that again, the regression weights have been correctly estimated. # # Visualize the effect on raw data # ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ # Once we have obtained robust regression weights, we can use them to apply the # regression directly to raw, epoched, and evoked data. Here, we will use the # regression weights obtained from the blink evoked and apply it to an instance # of `~mne.io.Raw`. order = np.concatenate([ # plotting order: EOG first, then EEG mne.pick_types(raw.info, meg=False, eog=True), mne.pick_types(raw.info, meg=False, eeg=True), ]) raw_kwargs = dict(events=eog_epochs.events, order=order, start=13, duration=3, n_channels=10, scalings=dict(eeg=50e-6, eog=250e-6)) # plot original data raw.plot(**raw_kwargs) # regress (using coefficients computed previously) and plot raw_clean = model_evoked.apply(raw) raw_clean.plot(**raw_kwargs) # %% # References # ^^^^^^^^^^ # .. footbibliography::
bsd-3-clause
bsipocz/statsmodels
statsmodels/stats/tests/test_diagnostic.py
21
40146
# -*- coding: utf-8 -*- """Tests for Regression Diagnostics and Specification Tests Created on Thu Feb 09 13:19:47 2012 Author: Josef Perktold License: BSD-3 currently all tests are against R """ #import warnings #warnings.simplefilter("default") # ResourceWarning doesn't exist in python 2 #warnings.simplefilter("ignore", ResourceWarning) import os import numpy as np from numpy.testing import (assert_, assert_almost_equal, assert_equal, assert_approx_equal, assert_allclose) from nose import SkipTest from statsmodels.regression.linear_model import OLS, GLSAR from statsmodels.tools.tools import add_constant from statsmodels.datasets import macrodata import statsmodels.stats.sandwich_covariance as sw import statsmodels.stats.diagnostic as smsdia import json #import statsmodels.sandbox.stats.diagnostic as smsdia import statsmodels.stats.outliers_influence as oi cur_dir = os.path.abspath(os.path.dirname(__file__)) def compare_t_est(sp, sp_dict, decimal=(14, 14)): assert_almost_equal(sp[0], sp_dict['statistic'], decimal=decimal[0]) assert_almost_equal(sp[1], sp_dict['pvalue'], decimal=decimal[1]) def notyet_atst(): d = macrodata.load().data realinv = d['realinv'] realgdp = d['realgdp'] realint = d['realint'] endog = realinv exog = add_constant(np.c_[realgdp, realint]) res_ols1 = OLS(endog, exog).fit() #growth rates gs_l_realinv = 400 * np.diff(np.log(d['realinv'])) gs_l_realgdp = 400 * np.diff(np.log(d['realgdp'])) lint = d['realint'][:-1] tbilrate = d['tbilrate'][:-1] endogg = gs_l_realinv exogg = add_constant(np.c_[gs_l_realgdp, lint]) exogg2 = add_constant(np.c_[gs_l_realgdp, tbilrate]) res_ols = OLS(endogg, exogg).fit() res_ols2 = OLS(endogg, exogg2).fit() #the following were done accidentally with res_ols1 in R, #with original Greene data params = np.array([-272.3986041341653, 0.1779455206941112, 0.2149432424658157]) cov_hac_4 = np.array([1321.569466333051, -0.2318836566017612, 37.01280466875694, -0.2318836566017614, 4.602339488102263e-05, -0.0104687835998635, 37.012804668757, -0.0104687835998635, 21.16037144168061]).reshape(3,3, order='F') cov_hac_10 = np.array([2027.356101193361, -0.3507514463299015, 54.81079621448568, -0.350751446329901, 6.953380432635583e-05, -0.01268990195095196, 54.81079621448564, -0.01268990195095195, 22.92512402151113]).reshape(3,3, order='F') #goldfeld-quandt het_gq_greater = dict(statistic=13.20512768685082, df1=99, df2=98, pvalue=1.246141976112324e-30, distr='f') het_gq_less = dict(statistic=13.20512768685082, df1=99, df2=98, pvalue=1.) het_gq_2sided = dict(statistic=13.20512768685082, df1=99, df2=98, pvalue=1.246141976112324e-30, distr='f') #goldfeld-quandt, fraction = 0.5 het_gq_greater_2 = dict(statistic=87.1328934692124, df1=48, df2=47, pvalue=2.154956842194898e-33, distr='f') gq = smsdia.het_goldfeldquandt(endog, exog, split=0.5) compare_t_est(gq, het_gq_greater, decimal=(13, 14)) assert_equal(gq[-1], 'increasing') harvey_collier = dict(stat=2.28042114041313, df=199, pvalue=0.02364236161988260, distr='t') #hc = harvtest(fm, order.by=ggdp , data = list()) harvey_collier_2 = dict(stat=0.7516918462158783, df=199, pvalue=0.4531244858006127, distr='t') ################################## class TestDiagnosticG(object): def __init__(self): d = macrodata.load().data #growth rates gs_l_realinv = 400 * np.diff(np.log(d['realinv'])) gs_l_realgdp = 400 * np.diff(np.log(d['realgdp'])) lint = d['realint'][:-1] tbilrate = d['tbilrate'][:-1] endogg = gs_l_realinv exogg = add_constant(np.c_[gs_l_realgdp, lint]) exogg2 = add_constant(np.c_[gs_l_realgdp, tbilrate]) exogg3 = add_constant(np.c_[gs_l_realgdp]) res_ols = OLS(endogg, exogg).fit() res_ols2 = OLS(endogg, exogg2).fit() res_ols3 = OLS(endogg, exogg3).fit() self.res = res_ols self.res2 = res_ols2 self.res3 = res_ols3 self.endog = self.res.model.endog self.exog = self.res.model.exog def test_basic(self): #mainly to check I got the right regression #> mkarray(fm$coefficients, "params") params = np.array([-9.48167277465485, 4.3742216647032, -0.613996969478989]) assert_almost_equal(self.res.params, params, decimal=12) def test_hac(self): res = self.res #> nw = NeweyWest(fm, lag = 4, prewhite = FALSE, verbose=TRUE) #> nw2 = NeweyWest(fm, lag=10, prewhite = FALSE, verbose=TRUE) #> mkarray(nw, "cov_hac_4") cov_hac_4 = np.array([1.385551290884014, -0.3133096102522685, -0.0597207976835705, -0.3133096102522685, 0.1081011690351306, 0.000389440793564336, -0.0597207976835705, 0.000389440793564339, 0.0862118527405036]).reshape(3,3, order='F') #> mkarray(nw2, "cov_hac_10") cov_hac_10 = np.array([1.257386180080192, -0.2871560199899846, -0.03958300024627573, -0.2871560199899845, 0.1049107028987101, 0.0003896205316866944, -0.03958300024627578, 0.0003896205316866961, 0.0985539340694839]).reshape(3,3, order='F') cov = sw.cov_hac_simple(res, nlags=4, use_correction=False) bse_hac = sw.se_cov(cov) assert_almost_equal(cov, cov_hac_4, decimal=14) assert_almost_equal(bse_hac, np.sqrt(np.diag(cov)), decimal=14) cov = sw.cov_hac_simple(res, nlags=10, use_correction=False) bse_hac = sw.se_cov(cov) assert_almost_equal(cov, cov_hac_10, decimal=14) assert_almost_equal(bse_hac, np.sqrt(np.diag(cov)), decimal=14) def test_het_goldfeldquandt(self): #TODO: test options missing #> gq = gqtest(fm, alternative='greater') #> mkhtest_f(gq, 'het_gq_greater', 'f') het_gq_greater = dict(statistic=0.5313259064778423, pvalue=0.9990217851193723, parameters=(98, 98), distr='f') #> gq = gqtest(fm, alternative='less') #> mkhtest_f(gq, 'het_gq_less', 'f') het_gq_less = dict(statistic=0.5313259064778423, pvalue=0.000978214880627621, parameters=(98, 98), distr='f') #> gq = gqtest(fm, alternative='two.sided') #> mkhtest_f(gq, 'het_gq_two_sided', 'f') het_gq_two_sided = dict(statistic=0.5313259064778423, pvalue=0.001956429761255241, parameters=(98, 98), distr='f') #> gq = gqtest(fm, fraction=0.1, alternative='two.sided') #> mkhtest_f(gq, 'het_gq_two_sided_01', 'f') het_gq_two_sided_01 = dict(statistic=0.5006976835928314, pvalue=0.001387126702579789, parameters=(88, 87), distr='f') #> gq = gqtest(fm, fraction=0.5, alternative='two.sided') #> mkhtest_f(gq, 'het_gq_two_sided_05', 'f') het_gq_two_sided_05 = dict(statistic=0.434815645134117, pvalue=0.004799321242905568, parameters=(48, 47), distr='f') endogg, exogg = self.endog, self.exog #tests gq = smsdia.het_goldfeldquandt(endogg, exogg, split=0.5) compare_t_est(gq, het_gq_greater, decimal=(14, 14)) assert_equal(gq[-1], 'increasing') gq = smsdia.het_goldfeldquandt(endogg, exogg, split=0.5, alternative='decreasing') compare_t_est(gq, het_gq_less, decimal=(14, 14)) assert_equal(gq[-1], 'decreasing') gq = smsdia.het_goldfeldquandt(endogg, exogg, split=0.5, alternative='two-sided') compare_t_est(gq, het_gq_two_sided, decimal=(14, 14)) assert_equal(gq[-1], 'two-sided') #TODO: forcing the same split as R 202-90-90-1=21 gq = smsdia.het_goldfeldquandt(endogg, exogg, split=90, drop=21, alternative='two-sided') compare_t_est(gq, het_gq_two_sided_01, decimal=(14, 14)) assert_equal(gq[-1], 'two-sided') #TODO other options ??? def test_het_breusch_pagan(self): res = self.res bptest = dict(statistic=0.709924388395087, pvalue=0.701199952134347, parameters=(2,), distr='f') bp = smsdia.het_breuschpagan(res.resid, res.model.exog) compare_t_est(bp, bptest, decimal=(12, 12)) def test_het_white(self): res = self.res #TODO: regressiontest, compare with Greene or Gretl or Stata hw = smsdia.het_white(res.resid, res.model.exog) hw_values = (33.503722896538441, 2.9887960597830259e-06, 7.7945101228430946, 1.0354575277704231e-06) assert_almost_equal(hw, hw_values) def test_het_arch(self): #test het_arch and indirectly het_lm against R #> library(FinTS) #> at = ArchTest(residuals(fm), lags=4) #> mkhtest(at, 'archtest_4', 'chi2') archtest_4 = dict(statistic=3.43473400836259, pvalue=0.487871315392619, parameters=(4,), distr='chi2') #> at = ArchTest(residuals(fm), lags=12) #> mkhtest(at, 'archtest_12', 'chi2') archtest_12 = dict(statistic=8.648320999014171, pvalue=0.732638635007718, parameters=(12,), distr='chi2') at4 = smsdia.het_arch(self.res.resid, maxlag=4) at12 = smsdia.het_arch(self.res.resid, maxlag=12) compare_t_est(at4[:2], archtest_4, decimal=(12, 13)) compare_t_est(at12[:2], archtest_12, decimal=(12, 13)) def test_het_arch2(self): #test autolag options, this also test het_lm #unfortunately optimal lag=1 for this data resid = self.res.resid res1 = smsdia.het_arch(resid, maxlag=1, autolag=None, store=True) rs1 = res1[-1] res2 = smsdia.het_arch(resid, maxlag=5, autolag='aic', store=True) rs2 = res2[-1] assert_almost_equal(rs2.resols.params, rs1.resols.params, decimal=13) assert_almost_equal(res2[:4], res1[:4], decimal=13) #test that smallest lag, maxlag=1 works res3 = smsdia.het_arch(resid, maxlag=1, autolag='aic') assert_almost_equal(res3[:4], res1[:4], decimal=13) def test_acorr_breusch_godfrey(self): res = self.res #bgf = bgtest(fm, order = 4, type="F") breuschgodfrey_f = dict(statistic=1.179280833676792, pvalue=0.321197487261203, parameters=(4,195,), distr='f') #> bgc = bgtest(fm, order = 4, type="Chisq") #> mkhtest(bgc, "breuschpagan_c", "chi2") breuschgodfrey_c = dict(statistic=4.771042651230007, pvalue=0.3116067133066697, parameters=(4,), distr='chi2') bg = smsdia.acorr_breusch_godfrey(res, nlags=4) bg_r = [breuschgodfrey_c['statistic'], breuschgodfrey_c['pvalue'], breuschgodfrey_f['statistic'], breuschgodfrey_f['pvalue']] assert_almost_equal(bg, bg_r, decimal=13) # check that lag choice works bg2 = smsdia.acorr_breusch_godfrey(res, nlags=None) bg3 = smsdia.acorr_breusch_godfrey(res, nlags=14) assert_almost_equal(bg2, bg3, decimal=13) def test_acorr_ljung_box(self): res = self.res #> bt = Box.test(residuals(fm), lag=4, type = "Ljung-Box") #> mkhtest(bt, "ljung_box_4", "chi2") ljung_box_4 = dict(statistic=5.23587172795227, pvalue=0.263940335284713, parameters=(4,), distr='chi2') #> bt = Box.test(residuals(fm), lag=4, type = "Box-Pierce") #> mkhtest(bt, "ljung_box_bp_4", "chi2") ljung_box_bp_4 = dict(statistic=5.12462932741681, pvalue=0.2747471266820692, parameters=(4,), distr='chi2') #ddof correction for fitted parameters in ARMA(p,q) fitdf=p+q #> bt = Box.test(residuals(fm), lag=4, type = "Ljung-Box", fitdf=2) #> mkhtest(bt, "ljung_box_4df2", "chi2") ljung_box_4df2 = dict(statistic=5.23587172795227, pvalue=0.0729532930400377, parameters=(2,), distr='chi2') #> bt = Box.test(residuals(fm), lag=4, type = "Box-Pierce", fitdf=2) #> mkhtest(bt, "ljung_box_bp_4df2", "chi2") ljung_box_bp_4df2 = dict(statistic=5.12462932741681, pvalue=0.0771260128929921, parameters=(2,), distr='chi2') lb, lbpval, bp, bppval = smsdia.acorr_ljungbox(res.resid, 4, boxpierce=True) compare_t_est([lb[-1], lbpval[-1]], ljung_box_4, decimal=(13, 14)) compare_t_est([bp[-1], bppval[-1]], ljung_box_bp_4, decimal=(13, 14)) def test_harvey_collier(self): #> hc = harvtest(fm, order.by = NULL, data = list()) #> mkhtest_f(hc, 'harvey_collier', 't') harvey_collier = dict(statistic=0.494432160939874, pvalue=0.6215491310408242, parameters=(198), distr='t') #> hc2 = harvtest(fm, order.by=ggdp , data = list()) #> mkhtest_f(hc2, 'harvey_collier_2', 't') harvey_collier_2 = dict(statistic=1.42104628340473, pvalue=0.1568762892441689, parameters=(198), distr='t') hc = smsdia.linear_harvey_collier(self.res) compare_t_est(hc, harvey_collier, decimal=(12, 12)) def test_rainbow(self): #rainbow test #> rt = raintest(fm) #> mkhtest_f(rt, 'raintest', 'f') raintest = dict(statistic=0.6809600116739604, pvalue=0.971832843583418, parameters=(101, 98), distr='f') #> rt = raintest(fm, center=0.4) #> mkhtest_f(rt, 'raintest_center_04', 'f') raintest_center_04 = dict(statistic=0.682635074191527, pvalue=0.971040230422121, parameters=(101, 98), distr='f') #> rt = raintest(fm, fraction=0.4) #> mkhtest_f(rt, 'raintest_fraction_04', 'f') raintest_fraction_04 = dict(statistic=0.565551237772662, pvalue=0.997592305968473, parameters=(122, 77), distr='f') #> rt = raintest(fm, order.by=ggdp) #Warning message: #In if (order.by == "mahalanobis") { : # the condition has length > 1 and only the first element will be used #> mkhtest_f(rt, 'raintest_order_gdp', 'f') raintest_order_gdp = dict(statistic=1.749346160513353, pvalue=0.002896131042494884, parameters=(101, 98), distr='f') rb = smsdia.linear_rainbow(self.res) compare_t_est(rb, raintest, decimal=(13, 14)) rb = smsdia.linear_rainbow(self.res, frac=0.4) compare_t_est(rb, raintest_fraction_04, decimal=(13, 14)) def test_compare_lr(self): res = self.res res3 = self.res3 #nested within res #lrtest #lrt = lrtest(fm, fm2) #Model 1: ginv ~ ggdp + lint #Model 2: ginv ~ ggdp lrtest = dict(loglike1=-763.9752181602237, loglike2=-766.3091902020184, chi2value=4.66794408358942, pvalue=0.03073069384028677, df=(4,3,1)) lrt = res.compare_lr_test(res3) assert_almost_equal(lrt[0], lrtest['chi2value'], decimal=11) assert_almost_equal(lrt[1], lrtest['pvalue'], decimal=11) waldtest = dict(fvalue=4.65216373312492, pvalue=0.03221346195239025, df=(199,200,1)) wt = res.compare_f_test(res3) assert_almost_equal(wt[0], waldtest['fvalue'], decimal=11) assert_almost_equal(wt[1], waldtest['pvalue'], decimal=11) def test_compare_nonnested(self): res = self.res res2 = self.res2 #jt = jtest(fm, lm(ginv ~ ggdp + tbilrate)) #Estimate Std. Error t value Pr(>|t|) jtest = [('M1 + fitted(M2)', 1.591505670785873, 0.7384552861695823, 2.155182176352370, 0.032354572525314450, '*'), ('M2 + fitted(M1)', 1.305687653016899, 0.4808385176653064, 2.715438978051544, 0.007203854534057954, '**')] jt1 = smsdia.compare_j(res2, res) assert_almost_equal(jt1, jtest[0][3:5], decimal=13) jt2 = smsdia.compare_j(res, res2) assert_almost_equal(jt2, jtest[1][3:5], decimal=14) #Estimate Std. Error z value Pr(>|z|) coxtest = [('fitted(M1) ~ M2', -0.782030488930356, 0.599696502782265, -1.304043770977755, 1.922186587840554e-01, ' '), ('fitted(M2) ~ M1', -2.248817107408537, 0.392656854330139, -5.727181590258883, 1.021128495098556e-08, '***')] ct1 = smsdia.compare_cox(res, res2) assert_almost_equal(ct1, coxtest[0][3:5], decimal=13) ct2 = smsdia.compare_cox(res2, res) assert_almost_equal(ct2, coxtest[1][3:5], decimal=12) #TODO should be approx # Res.Df Df F Pr(>F) encomptest = [('M1 vs. ME', 198, -1, 4.644810213266983, 0.032354572525313666, '*'), ('M2 vs. ME', 198, -1, 7.373608843521585, 0.007203854534058054, '**')] # Estimate Std. Error t value petest = [('M1 + log(fit(M1))-fit(M2)', -229.281878354594596, 44.5087822087058598, -5.15139, 6.201281252449979e-07), ('M2 + fit(M1)-exp(fit(M2))', 0.000634664704814, 0.0000462387010349, 13.72583, 1.319536115230356e-30)] def test_cusum_ols(self): #R library(strucchange) #> sc = sctest(ginv ~ ggdp + lint, type="OLS-CUSUM") #> mkhtest(sc, 'cusum_ols', 'BB') cusum_ols = dict(statistic=1.055750610401214, pvalue=0.2149567397376543, parameters=(), distr='BB') #Brownian Bridge k_vars=3 cs_ols = smsdia.breaks_cusumolsresid(self.res.resid, ddof=k_vars) # compare_t_est(cs_ols, cusum_ols, decimal=(12, 12)) def test_breaks_hansen(self): #> sc = sctest(ginv ~ ggdp + lint, type="Nyblom-Hansen") #> mkhtest(sc, 'breaks_nyblom_hansen', 'BB') breaks_nyblom_hansen = dict(statistic=1.0300792740544484, pvalue=0.1136087530212015, parameters=(), distr='BB') bh = smsdia.breaks_hansen(self.res) assert_almost_equal(bh[0], breaks_nyblom_hansen['statistic'], decimal=13) #TODO: breaks_hansen doesn't return pvalues def test_recursive_residuals(self): reccumres_standardize = np.array([-2.151, -3.748, -3.114, -3.096, -1.865, -2.230, -1.194, -3.500, -3.638, -4.447, -4.602, -4.631, -3.999, -4.830, -5.429, -5.435, -6.554, -8.093, -8.567, -7.532, -7.079, -8.468, -9.320, -12.256, -11.932, -11.454, -11.690, -11.318, -12.665, -12.842, -11.693, -10.803, -12.113, -12.109, -13.002, -11.897, -10.787, -10.159, -9.038, -9.007, -8.634, -7.552, -7.153, -6.447, -5.183, -3.794, -3.511, -3.979, -3.236, -3.793, -3.699, -5.056, -5.724, -4.888, -4.309, -3.688, -3.918, -3.735, -3.452, -2.086, -6.520, -7.959, -6.760, -6.855, -6.032, -4.405, -4.123, -4.075, -3.235, -3.115, -3.131, -2.986, -1.813, -4.824, -4.424, -4.796, -4.000, -3.390, -4.485, -4.669, -4.560, -3.834, -5.507, -3.792, -2.427, -1.756, -0.354, 1.150, 0.586, 0.643, 1.773, -0.830, -0.388, 0.517, 0.819, 2.240, 3.791, 3.187, 3.409, 2.431, 0.668, 0.957, -0.928, 0.327, -0.285, -0.625, -2.316, -1.986, -0.744, -1.396, -1.728, -0.646, -2.602, -2.741, -2.289, -2.897, -1.934, -2.532, -3.175, -2.806, -3.099, -2.658, -2.487, -2.515, -2.224, -2.416, -1.141, 0.650, -0.947, 0.725, 0.439, 0.885, 2.419, 2.642, 2.745, 3.506, 4.491, 5.377, 4.624, 5.523, 6.488, 6.097, 5.390, 6.299, 6.656, 6.735, 8.151, 7.260, 7.846, 8.771, 8.400, 8.717, 9.916, 9.008, 8.910, 8.294, 8.982, 8.540, 8.395, 7.782, 7.794, 8.142, 8.362, 8.400, 7.850, 7.643, 8.228, 6.408, 7.218, 7.699, 7.895, 8.725, 8.938, 8.781, 8.350, 9.136, 9.056, 10.365, 10.495, 10.704, 10.784, 10.275, 10.389, 11.586, 11.033, 11.335, 11.661, 10.522, 10.392, 10.521, 10.126, 9.428, 9.734, 8.954, 9.949, 10.595, 8.016, 6.636, 6.975]) rr = smsdia.recursive_olsresiduals(self.res, skip=3, alpha=0.95) assert_equal(np.round(rr[5][1:], 3), reccumres_standardize) #extra zero in front #assert_equal(np.round(rr[3][4:], 3), np.diff(reccumres_standardize)) assert_almost_equal(rr[3][4:], np.diff(reccumres_standardize),3) assert_almost_equal(rr[4][3:].std(ddof=1), 10.7242, decimal=4) #regression number, visually checked with graph from gretl ub0 = np.array([ 13.37318571, 13.50758959, 13.64199346, 13.77639734, 13.91080121]) ub1 = np.array([ 39.44753774, 39.58194162, 39.7163455 , 39.85074937, 39.98515325]) lb, ub = rr[6] assert_almost_equal(ub[:5], ub0, decimal=7) assert_almost_equal(lb[:5], -ub0, decimal=7) assert_almost_equal(ub[-5:], ub1, decimal=7) assert_almost_equal(lb[-5:], -ub1, decimal=7) #test a few values with explicit OLS endog = self.res.model.endog exog = self.res.model.exog params = [] ypred = [] for i in range(3,10): resi = OLS(endog[:i], exog[:i]).fit() ypred.append(resi.model.predict(resi.params, exog[i])) params.append(resi.params) assert_almost_equal(rr[2][3:10], ypred, decimal=12) assert_almost_equal(rr[0][3:10], endog[3:10] - ypred, decimal=12) assert_almost_equal(rr[1][2:9], params, decimal=12) def test_normality(self): res = self.res #> library(nortest) #Lilliefors (Kolmogorov-Smirnov) normality test #> lt = lillie.test(residuals(fm)) #> mkhtest(lt, "lilliefors", "-") lilliefors1 = dict(statistic=0.0723390908786589, pvalue=0.01204113540102896, parameters=(), distr='-') #> lt = lillie.test(residuals(fm)**2) #> mkhtest(lt, "lilliefors", "-") lilliefors2 = dict(statistic=0.301311621898024, pvalue=1.004305736618051e-51, parameters=(), distr='-') #> lt = lillie.test(residuals(fm)[1:20]) #> mkhtest(lt, "lilliefors", "-") lilliefors3 = dict(statistic=0.1333956004203103, pvalue=0.4618672180799566, parameters=(), distr='-') lf1 = smsdia.lilliefors(res.resid) lf2 = smsdia.lilliefors(res.resid**2) lf3 = smsdia.lilliefors(res.resid[:20]) compare_t_est(lf1, lilliefors1, decimal=(14, 14)) compare_t_est(lf2, lilliefors2, decimal=(14, 14)) #pvalue very small assert_approx_equal(lf2[1], lilliefors2['pvalue'], significant=10) compare_t_est(lf3, lilliefors3, decimal=(14, 1)) #R uses different approximation for pvalue in last case #> ad = ad.test(residuals(fm)) #> mkhtest(ad, "ad3", "-") adr1 = dict(statistic=1.602209621518313, pvalue=0.0003937979149362316, parameters=(), distr='-') #> ad = ad.test(residuals(fm)**2) #> mkhtest(ad, "ad3", "-") adr2 = dict(statistic=np.inf, pvalue=np.nan, parameters=(), distr='-') #> ad = ad.test(residuals(fm)[1:20]) #> mkhtest(ad, "ad3", "-") adr3 = dict(statistic=0.3017073732210775, pvalue=0.5443499281265933, parameters=(), distr='-') ad1 = smsdia.normal_ad(res.resid) compare_t_est(ad1, adr1, decimal=(11, 13)) ad2 = smsdia.normal_ad(res.resid**2) assert_(np.isinf(ad2[0])) ad3 = smsdia.normal_ad(res.resid[:20]) compare_t_est(ad3, adr3, decimal=(11, 12)) def test_influence(self): res = self.res #this test is slow infl = oi.OLSInfluence(res) fp = open(os.path.join(cur_dir,"results/influence_lsdiag_R.json")) lsdiag = json.load(fp) #basic assert_almost_equal(np.array(lsdiag['cov.scaled']).reshape(3, 3), res.cov_params(), decimal=14) assert_almost_equal(np.array(lsdiag['cov.unscaled']).reshape(3, 3), res.normalized_cov_params, decimal=14) c0, c1 = infl.cooks_distance #TODO: what's c1 assert_almost_equal(c0, lsdiag['cooks'], decimal=14) assert_almost_equal(infl.hat_matrix_diag, lsdiag['hat'], decimal=14) assert_almost_equal(infl.resid_studentized_internal, lsdiag['std.res'], decimal=14) #slow: #infl._get_all_obs() #slow, nobs estimation loop, called implicitly dffits, dffth = infl.dffits assert_almost_equal(dffits, lsdiag['dfits'], decimal=14) assert_almost_equal(infl.resid_studentized_external, lsdiag['stud.res'], decimal=14) import pandas fn = os.path.join(cur_dir,"results/influence_measures_R.csv") infl_r = pandas.read_csv(fn, index_col=0) conv = lambda s: 1 if s=='TRUE' else 0 fn = os.path.join(cur_dir,"results/influence_measures_bool_R.csv") #not used yet: #infl_bool_r = pandas.read_csv(fn, index_col=0, # converters=dict(zip(lrange(7),[conv]*7))) infl_r2 = np.asarray(infl_r) assert_almost_equal(infl.dfbetas, infl_r2[:,:3], decimal=13) assert_almost_equal(infl.cov_ratio, infl_r2[:,4], decimal=14) #duplicates assert_almost_equal(dffits, infl_r2[:,3], decimal=14) assert_almost_equal(c0, infl_r2[:,5], decimal=14) assert_almost_equal(infl.hat_matrix_diag, infl_r2[:,6], decimal=14) #Note: for dffits, R uses a threshold around 0.36, mine: dffits[1]=0.24373 #TODO: finish and check thresholds and pvalues ''' R has >>> np.nonzero(np.asarray(infl_bool_r["dffit"]))[0] array([ 6, 26, 63, 76, 90, 199]) >>> np.nonzero(np.asarray(infl_bool_r["cov.r"]))[0] array([ 4, 26, 59, 61, 63, 72, 76, 84, 91, 92, 94, 95, 108, 197, 198]) >>> np.nonzero(np.asarray(infl_bool_r["hat"]))[0] array([ 62, 76, 84, 90, 91, 92, 95, 108, 197, 199]) ''' class TestDiagnosticGPandas(TestDiagnosticG): def __init__(self): d = macrodata.load_pandas().data #growth rates d['gs_l_realinv'] = 400 * np.log(d['realinv']).diff() d['gs_l_realgdp'] = 400 * np.log(d['realgdp']).diff() d['lint'] = d['realint'].shift(1) d['tbilrate'] = d['tbilrate'].shift(1) d = d.dropna() self.d = d endogg = d['gs_l_realinv'] exogg = add_constant(d[['gs_l_realgdp', 'lint']]) exogg2 = add_constant(d[['gs_l_realgdp', 'tbilrate']]) exogg3 = add_constant(d[['gs_l_realgdp']]) res_ols = OLS(endogg, exogg).fit() res_ols2 = OLS(endogg, exogg2).fit() res_ols3 = OLS(endogg, exogg3).fit() self.res = res_ols self.res2 = res_ols2 self.res3 = res_ols3 self.endog = self.res.model.endog self.exog = self.res.model.exog def grangertest(): #> gt = grangertest(ginv, ggdp, order=4) #> gt #Granger causality test # #Model 1: ggdp ~ Lags(ggdp, 1:4) + Lags(ginv, 1:4) #Model 2: ggdp ~ Lags(ggdp, 1:4) grangertest = dict(fvalue=1.589672703015157, pvalue=0.178717196987075, df=(198,193)) def test_outlier_influence_funcs(): #smoke test x = add_constant(np.random.randn(10, 2)) y = x.sum(1) + np.random.randn(10) res = OLS(y, x).fit() oi.summary_table(res, alpha=0.05) res2 = OLS(y, x[:,0]).fit() oi.summary_table(res2, alpha=0.05) infl = res2.get_influence() infl.summary_table() def test_influence_wrapped(): from pandas import DataFrame from pandas.util.testing import assert_series_equal d = macrodata.load_pandas().data #growth rates gs_l_realinv = 400 * np.log(d['realinv']).diff().dropna() gs_l_realgdp = 400 * np.log(d['realgdp']).diff().dropna() lint = d['realint'][:-1] # re-index these because they won't conform to lint gs_l_realgdp.index = lint.index gs_l_realinv.index = lint.index data = dict(const=np.ones_like(lint), lint=lint, lrealgdp=gs_l_realgdp) #order is important exog = DataFrame(data, columns=['const','lrealgdp','lint']) res = OLS(gs_l_realinv, exog).fit() #basic # already tested #assert_almost_equal(lsdiag['cov.scaled'], # res.cov_params().values.ravel(), decimal=14) #assert_almost_equal(lsdiag['cov.unscaled'], # res.normalized_cov_params.values.ravel(), decimal=14) infl = oi.OLSInfluence(res) # smoke test just to make sure it works, results separately tested df = infl.summary_frame() assert_(isinstance(df, DataFrame)) #this test is slow fp = open(os.path.join(cur_dir,"results/influence_lsdiag_R.json")) lsdiag = json.load(fp) c0, c1 = infl.cooks_distance #TODO: what's c1, it's pvalues? -ss #NOTE: we get a hard-cored 5 decimals with pandas testing assert_almost_equal(c0, lsdiag['cooks'], 14) assert_almost_equal(infl.hat_matrix_diag, (lsdiag['hat']), 14) assert_almost_equal(infl.resid_studentized_internal, lsdiag['std.res'], 14) #slow: dffits, dffth = infl.dffits assert_almost_equal(dffits, lsdiag['dfits'], 14) assert_almost_equal(infl.resid_studentized_external, lsdiag['stud.res'], 14) import pandas fn = os.path.join(cur_dir,"results/influence_measures_R.csv") infl_r = pandas.read_csv(fn, index_col=0) conv = lambda s: 1 if s=='TRUE' else 0 fn = os.path.join(cur_dir,"results/influence_measures_bool_R.csv") #not used yet: #infl_bool_r = pandas.read_csv(fn, index_col=0, # converters=dict(zip(lrange(7),[conv]*7))) infl_r2 = np.asarray(infl_r) #TODO: finish wrapping this stuff assert_almost_equal(infl.dfbetas, infl_r2[:,:3], decimal=13) assert_almost_equal(infl.cov_ratio, infl_r2[:,4], decimal=14) def test_influence_dtype(): # see #2148 bug when endog is integer y = np.ones(20) np.random.seed(123) x = np.random.randn(20, 3) res1 = OLS(y, x).fit() res2 = OLS(y*1., x).fit() cr1 = res1.get_influence().cov_ratio cr2 = res2.get_influence().cov_ratio assert_allclose(cr1, cr2, rtol=1e-14) # regression test for values cr3 = np.array( [ 1.22239215, 1.31551021, 1.52671069, 1.05003921, 0.89099323, 1.57405066, 1.03230092, 0.95844196, 1.15531836, 1.21963623, 0.87699564, 1.16707748, 1.10481391, 0.98839447, 1.08999334, 1.35680102, 1.46227715, 1.45966708, 1.13659521, 1.22799038]) assert_almost_equal(cr1, cr3, decimal=8) def test_outlier_test(): # results from R with NA -> 1. Just testing interface here because # outlier_test is just a wrapper labels = ['accountant', 'pilot', 'architect', 'author', 'chemist', 'minister', 'professor', 'dentist', 'reporter', 'engineer', 'undertaker', 'lawyer', 'physician', 'welfare.worker', 'teacher', 'conductor', 'contractor', 'factory.owner', 'store.manager', 'banker', 'bookkeeper', 'mail.carrier', 'insurance.agent', 'store.clerk', 'carpenter', 'electrician', 'RR.engineer', 'machinist', 'auto.repairman', 'plumber', 'gas.stn.attendant', 'coal.miner', 'streetcar.motorman', 'taxi.driver', 'truck.driver', 'machine.operator', 'barber', 'bartender', 'shoe.shiner', 'cook', 'soda.clerk', 'watchman', 'janitor', 'policeman', 'waiter'] #Duncan's prestige data from car exog = [[1.0, 62.0, 86.0], [1.0, 72.0, 76.0], [1.0, 75.0, 92.0], [1.0, 55.0, 90.0], [1.0, 64.0, 86.0], [1.0, 21.0, 84.0], [1.0, 64.0, 93.0], [1.0, 80.0, 100.0], [1.0, 67.0, 87.0], [1.0, 72.0, 86.0], [1.0, 42.0, 74.0], [1.0, 76.0, 98.0], [1.0, 76.0, 97.0], [1.0, 41.0, 84.0], [1.0, 48.0, 91.0], [1.0, 76.0, 34.0], [1.0, 53.0, 45.0], [1.0, 60.0, 56.0], [1.0, 42.0, 44.0], [1.0, 78.0, 82.0], [1.0, 29.0, 72.0], [1.0, 48.0, 55.0], [1.0, 55.0, 71.0], [1.0, 29.0, 50.0], [1.0, 21.0, 23.0], [1.0, 47.0, 39.0], [1.0, 81.0, 28.0], [1.0, 36.0, 32.0], [1.0, 22.0, 22.0], [1.0, 44.0, 25.0], [1.0, 15.0, 29.0], [1.0, 7.0, 7.0], [1.0, 42.0, 26.0], [1.0, 9.0, 19.0], [1.0, 21.0, 15.0], [1.0, 21.0, 20.0], [1.0, 16.0, 26.0], [1.0, 16.0, 28.0], [1.0, 9.0, 17.0], [1.0, 14.0, 22.0], [1.0, 12.0, 30.0], [1.0, 17.0, 25.0], [1.0, 7.0, 20.0], [1.0, 34.0, 47.0], [1.0, 8.0, 32.0]] endog = [ 82., 83., 90., 76., 90., 87., 93., 90., 52., 88., 57., 89., 97., 59., 73., 38., 76., 81., 45., 92., 39., 34., 41., 16., 33., 53., 67., 57., 26., 29., 10., 15., 19., 10., 13., 24., 20., 7., 3., 16., 6., 11., 8., 41., 10.] ndarray_mod = OLS(endog, exog).fit() rstudent = [3.1345185839, -2.3970223990, 2.0438046359, -1.9309187757, 1.8870465798, -1.7604905300, -1.7040324156, 1.6024285876, -1.4332485037, -1.1044851583, 1.0688582315, 1.0185271840, -0.9024219332, -0.9023876471, -0.8830953936, 0.8265782334, 0.8089220547, 0.7682770197, 0.7319491074, -0.6665962829, 0.5227352794, -0.5135016547, 0.5083881518, 0.4999224372, -0.4980818221, -0.4759717075, -0.4293565820, -0.4114056499, -0.3779540862, 0.3556874030, 0.3409200462, 0.3062248646, 0.3038999429, -0.3030815773, -0.1873387893, 0.1738050251, 0.1424246593, -0.1292266025, 0.1272066463, -0.0798902878, 0.0788467222, 0.0722556991, 0.0505098280, 0.0233215136, 0.0007112055] unadj_p = [0.003177202, 0.021170298, 0.047432955, 0.060427645, 0.066248120, 0.085783008, 0.095943909, 0.116738318, 0.159368890, 0.275822623, 0.291386358, 0.314400295, 0.372104049, 0.372122040, 0.382333561, 0.413260793, 0.423229432, 0.446725370, 0.468363101, 0.508764039, 0.603971990, 0.610356737, 0.613905871, 0.619802317, 0.621087703, 0.636621083, 0.669911674, 0.682917818, 0.707414459, 0.723898263, 0.734904667, 0.760983108, 0.762741124, 0.763360242, 0.852319039, 0.862874018, 0.887442197, 0.897810225, 0.899398691, 0.936713197, 0.937538115, 0.942749758, 0.959961394, 0.981506948, 0.999435989] bonf_p = [0.1429741, 0.9526634, 2.1344830, 2.7192440, 2.9811654, 3.8602354, 4.3174759, 5.2532243, 7.1716001, 12.4120180, 13.1123861, 14.1480133, 16.7446822, 16.7454918, 17.2050103, 18.5967357, 19.0453245, 20.1026416, 21.0763395, 22.8943818, 27.1787396, 27.4660532, 27.6257642, 27.8911043, 27.9489466, 28.6479487, 30.1460253, 30.7313018, 31.8336506, 32.5754218, 33.0707100, 34.2442399, 34.3233506, 34.3512109, 38.3543568, 38.8293308, 39.9348989, 40.4014601, 40.4729411, 42.1520939, 42.1892152, 42.4237391, 43.1982627, 44.1678127, 44.9746195] bonf_p = np.array(bonf_p) bonf_p[bonf_p > 1] = 1 sorted_labels = ["minister", "reporter", "contractor", "insurance.agent", "machinist", "store.clerk", "conductor", "factory.owner", "mail.carrier", "streetcar.motorman", "carpenter", "coal.miner", "bartender", "bookkeeper", "soda.clerk", "chemist", "RR.engineer", "professor", "electrician", "gas.stn.attendant", "auto.repairman", "watchman", "banker", "machine.operator", "dentist", "waiter", "shoe.shiner", "welfare.worker", "plumber", "physician", "pilot", "engineer", "accountant", "lawyer", "undertaker", "barber", "store.manager", "truck.driver", "cook", "janitor", "policeman", "architect", "teacher", "taxi.driver", "author"] res2 = np.c_[rstudent, unadj_p, bonf_p] res = oi.outlier_test(ndarray_mod, method='b', labels=labels, order=True) np.testing.assert_almost_equal(res.values, res2, 7) np.testing.assert_equal(res.index.tolist(), sorted_labels) # pylint: disable-msg=E1103 if __name__ == '__main__': import nose nose.runmodule(argv=[__file__, '-vvs', '-x'], exit=False) #t = TestDiagnosticG() #t.test_basic() #t.test_hac() #t.test_acorr_breusch_godfrey() #t.test_acorr_ljung_box() #t.test_het_goldfeldquandt() #t.test_het_breusch_pagan() #t.test_het_white() #t.test_compare_lr() #t.test_compare_nonnested() #t.test_influence() ################################################## ''' J test Model 1: ginv ~ ggdp + lint Model 2: ginv ~ ggdp + tbilrate Estimate Std. Error t value Pr(>|t|) M1 + fitted(M2) 1.591505670785873 0.7384552861695823 2.15518 0.0323546 * M2 + fitted(M1) 1.305687653016899 0.4808385176653064 2.71544 0.0072039 ** --- Signif. codes: 0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1 = lm(ginv ~ ggdp + tbilrate) > ct = coxtest(fm, fm3) > ct Cox test Model 1: ginv ~ ggdp + lint Model 2: ginv ~ ggdp + tbilrate Estimate Std. Error z value Pr(>|z|) fitted(M1) ~ M2 -0.782030488930356 0.599696502782265 -1.30404 0.19222 fitted(M2) ~ M1 -2.248817107408537 0.392656854330139 -5.72718 1.0211e-08 *** --- Signif. codes: 0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1 > et = encomptest(fm, fm3) > et Encompassing test Model 1: ginv ~ ggdp + lint Model 2: ginv ~ ggdp + tbilrate Model E: ginv ~ ggdp + lint + tbilrate Res.Df Df F Pr(>F) M1 vs. ME 198 -1 4.64481 0.0323546 * M2 vs. ME 198 -1 7.37361 0.0072039 ** --- Signif. codes: 0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1 > fm4 = lm(realinv ~ realgdp + realint, data=d) > fm5 = lm(log(realinv) ~ realgdp + realint, data=d) > pet = petest(fm4, fm5) > pet PE test Model 1: realinv ~ realgdp + realint Model 2: log(realinv) ~ realgdp + realint Estimate Std. Error t value M1 + log(fit(M1))-fit(M2) -229.281878354594596 44.5087822087058598 -5.15139 M2 + fit(M1)-exp(fit(M2)) 0.000634664704814 0.0000462387010349 13.72583 Pr(>|t|) M1 + log(fit(M1))-fit(M2) 6.2013e-07 *** M2 + fit(M1)-exp(fit(M2)) < 2.22e-16 *** --- Signif. codes: 0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1 '''
bsd-3-clause
chrisndodge/edx-platform
lms/djangoapps/course_blocks/transformers/tests/test_hidden_content.py
17
2597
""" Tests for HiddenContentTransformer. """ from datetime import timedelta import ddt from django.utils.timezone import now from nose.plugins.attrib import attr from ..hidden_content import HiddenContentTransformer from .helpers import BlockParentsMapTestCase, update_block @attr(shard=3) @ddt.ddt class HiddenContentTransformerTestCase(BlockParentsMapTestCase): """ VisibilityTransformer Test """ TRANSFORMER_CLASS_TO_TEST = HiddenContentTransformer ALL_BLOCKS = {0, 1, 2, 3, 4, 5, 6} class DueDateType(object): """ Use constant enum types for deterministic ddt test method names (rather than dynamically generated timestamps) """ none = 1, future = 2, past = 3 TODAY = now() PAST_DATE = TODAY - timedelta(days=30) FUTURE_DATE = TODAY + timedelta(days=30) @classmethod def due(cls, enum_value): """ Returns a start date for the given enum value """ if enum_value == cls.future: return cls.FUTURE_DATE elif enum_value == cls.past: return cls.PAST_DATE else: return None # Following test cases are based on BlockParentsMapTestCase.parents_map @ddt.data( ({}, ALL_BLOCKS), ({0: DueDateType.none}, ALL_BLOCKS), ({0: DueDateType.future}, ALL_BLOCKS), ({1: DueDateType.none}, ALL_BLOCKS), ({1: DueDateType.future}, ALL_BLOCKS), ({4: DueDateType.none}, ALL_BLOCKS), ({4: DueDateType.future}, ALL_BLOCKS), ({0: DueDateType.past}, {}), ({1: DueDateType.past}, ALL_BLOCKS - {1, 3, 4}), ({2: DueDateType.past}, ALL_BLOCKS - {2, 5}), ({4: DueDateType.past}, ALL_BLOCKS - {4}), ({1: DueDateType.past, 2: DueDateType.past}, {0}), ({1: DueDateType.none, 2: DueDateType.past}, ALL_BLOCKS - {2, 5}), ({1: DueDateType.past, 2: DueDateType.none}, ALL_BLOCKS - {1, 3, 4}), ) @ddt.unpack def test_hidden_content( self, hide_due_values, expected_visible_blocks, ): for idx, due_date_type in hide_due_values.iteritems(): block = self.get_block(idx) block.due = self.DueDateType.due(due_date_type) block.hide_after_due = True update_block(block) self.assert_transform_results( self.student, expected_visible_blocks, blocks_with_differing_access=None, transformers=self.transformers, )
agpl-3.0
cauchycui/scikit-learn
sklearn/neighbors/tests/test_nearest_centroid.py
302
4121
""" Testing for the nearest centroid module. """ import numpy as np from scipy import sparse as sp from numpy.testing import assert_array_equal from numpy.testing import assert_equal from sklearn.neighbors import NearestCentroid from sklearn import datasets from sklearn.metrics.pairwise import pairwise_distances # toy sample X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]] X_csr = sp.csr_matrix(X) # Sparse matrix y = [-1, -1, -1, 1, 1, 1] T = [[-1, -1], [2, 2], [3, 2]] T_csr = sp.csr_matrix(T) true_result = [-1, 1, 1] # also load the iris dataset # and randomly permute it iris = datasets.load_iris() rng = np.random.RandomState(1) perm = rng.permutation(iris.target.size) iris.data = iris.data[perm] iris.target = iris.target[perm] def test_classification_toy(): # Check classification on a toy dataset, including sparse versions. clf = NearestCentroid() clf.fit(X, y) assert_array_equal(clf.predict(T), true_result) # Same test, but with a sparse matrix to fit and test. clf = NearestCentroid() clf.fit(X_csr, y) assert_array_equal(clf.predict(T_csr), true_result) # Fit with sparse, test with non-sparse clf = NearestCentroid() clf.fit(X_csr, y) assert_array_equal(clf.predict(T), true_result) # Fit with non-sparse, test with sparse clf = NearestCentroid() clf.fit(X, y) assert_array_equal(clf.predict(T_csr), true_result) # Fit and predict with non-CSR sparse matrices clf = NearestCentroid() clf.fit(X_csr.tocoo(), y) assert_array_equal(clf.predict(T_csr.tolil()), true_result) def test_precomputed(): clf = NearestCentroid(metric="precomputed") clf.fit(X, y) S = pairwise_distances(T, clf.centroids_) assert_array_equal(clf.predict(S), true_result) def test_iris(): # Check consistency on dataset iris. for metric in ('euclidean', 'cosine'): clf = NearestCentroid(metric=metric).fit(iris.data, iris.target) score = np.mean(clf.predict(iris.data) == iris.target) assert score > 0.9, "Failed with score = " + str(score) def test_iris_shrinkage(): # Check consistency on dataset iris, when using shrinkage. for metric in ('euclidean', 'cosine'): for shrink_threshold in [None, 0.1, 0.5]: clf = NearestCentroid(metric=metric, shrink_threshold=shrink_threshold) clf = clf.fit(iris.data, iris.target) score = np.mean(clf.predict(iris.data) == iris.target) assert score > 0.8, "Failed with score = " + str(score) def test_pickle(): import pickle # classification obj = NearestCentroid() obj.fit(iris.data, iris.target) score = obj.score(iris.data, iris.target) s = pickle.dumps(obj) obj2 = pickle.loads(s) assert_equal(type(obj2), obj.__class__) score2 = obj2.score(iris.data, iris.target) assert_array_equal(score, score2, "Failed to generate same score" " after pickling (classification).") def test_shrinkage_threshold_decoded_y(): clf = NearestCentroid(shrink_threshold=0.01) y_ind = np.asarray(y) y_ind[y_ind == -1] = 0 clf.fit(X, y_ind) centroid_encoded = clf.centroids_ clf.fit(X, y) assert_array_equal(centroid_encoded, clf.centroids_) def test_predict_translated_data(): # Test that NearestCentroid gives same results on translated data rng = np.random.RandomState(0) X = rng.rand(50, 50) y = rng.randint(0, 3, 50) noise = rng.rand(50) clf = NearestCentroid(shrink_threshold=0.1) clf.fit(X, y) y_init = clf.predict(X) clf = NearestCentroid(shrink_threshold=0.1) X_noise = X + noise clf.fit(X_noise, y) y_translate = clf.predict(X_noise) assert_array_equal(y_init, y_translate) def test_manhattan_metric(): # Test the manhattan metric. clf = NearestCentroid(metric='manhattan') clf.fit(X, y) dense_centroid = clf.centroids_ clf.fit(X_csr, y) assert_array_equal(clf.centroids_, dense_centroid) assert_array_equal(dense_centroid, [[-1, -1], [1, 1]])
bsd-3-clause
AlexRobson/scikit-learn
sklearn/qda.py
139
7682
""" Quadratic Discriminant Analysis """ # Author: Matthieu Perrot <matthieu.perrot@gmail.com> # # License: BSD 3 clause import warnings import numpy as np from .base import BaseEstimator, ClassifierMixin from .externals.six.moves import xrange from .utils import check_array, check_X_y from .utils.validation import check_is_fitted from .utils.fixes import bincount __all__ = ['QDA'] class QDA(BaseEstimator, ClassifierMixin): """ Quadratic Discriminant Analysis (QDA) A classifier with a quadratic decision boundary, generated by fitting class conditional densities to the data and using Bayes' rule. The model fits a Gaussian density to each class. Read more in the :ref:`User Guide <lda_qda>`. Parameters ---------- priors : array, optional, shape = [n_classes] Priors on classes reg_param : float, optional Regularizes the covariance estimate as ``(1-reg_param)*Sigma + reg_param*np.eye(n_features)`` Attributes ---------- covariances_ : list of array-like, shape = [n_features, n_features] Covariance matrices of each class. means_ : array-like, shape = [n_classes, n_features] Class means. priors_ : array-like, shape = [n_classes] Class priors (sum to 1). rotations_ : list of arrays For each class k an array of shape [n_features, n_k], with ``n_k = min(n_features, number of elements in class k)`` It is the rotation of the Gaussian distribution, i.e. its principal axis. scalings_ : list of arrays For each class k an array of shape [n_k]. It contains the scaling of the Gaussian distributions along its principal axes, i.e. the variance in the rotated coordinate system. Examples -------- >>> from sklearn.qda import QDA >>> import numpy as np >>> X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]]) >>> y = np.array([1, 1, 1, 2, 2, 2]) >>> clf = QDA() >>> clf.fit(X, y) QDA(priors=None, reg_param=0.0) >>> print(clf.predict([[-0.8, -1]])) [1] See also -------- sklearn.lda.LDA: Linear discriminant analysis """ def __init__(self, priors=None, reg_param=0.): self.priors = np.asarray(priors) if priors is not None else None self.reg_param = reg_param def fit(self, X, y, store_covariances=False, tol=1.0e-4): """ Fit the QDA model according to the given training data and parameters. Parameters ---------- X : array-like, shape = [n_samples, n_features] Training vector, where n_samples in the number of samples and n_features is the number of features. y : array, shape = [n_samples] Target values (integers) store_covariances : boolean If True the covariance matrices are computed and stored in the `self.covariances_` attribute. tol : float, optional, default 1.0e-4 Threshold used for rank estimation. """ X, y = check_X_y(X, y) self.classes_, y = np.unique(y, return_inverse=True) n_samples, n_features = X.shape n_classes = len(self.classes_) if n_classes < 2: raise ValueError('y has less than 2 classes') if self.priors is None: self.priors_ = bincount(y) / float(n_samples) else: self.priors_ = self.priors cov = None if store_covariances: cov = [] means = [] scalings = [] rotations = [] for ind in xrange(n_classes): Xg = X[y == ind, :] meang = Xg.mean(0) means.append(meang) if len(Xg) == 1: raise ValueError('y has only 1 sample in class %s, covariance ' 'is ill defined.' % str(self.classes_[ind])) Xgc = Xg - meang # Xgc = U * S * V.T U, S, Vt = np.linalg.svd(Xgc, full_matrices=False) rank = np.sum(S > tol) if rank < n_features: warnings.warn("Variables are collinear") S2 = (S ** 2) / (len(Xg) - 1) S2 = ((1 - self.reg_param) * S2) + self.reg_param if store_covariances: # cov = V * (S^2 / (n-1)) * V.T cov.append(np.dot(S2 * Vt.T, Vt)) scalings.append(S2) rotations.append(Vt.T) if store_covariances: self.covariances_ = cov self.means_ = np.asarray(means) self.scalings_ = scalings self.rotations_ = rotations return self def _decision_function(self, X): check_is_fitted(self, 'classes_') X = check_array(X) norm2 = [] for i in range(len(self.classes_)): R = self.rotations_[i] S = self.scalings_[i] Xm = X - self.means_[i] X2 = np.dot(Xm, R * (S ** (-0.5))) norm2.append(np.sum(X2 ** 2, 1)) norm2 = np.array(norm2).T # shape = [len(X), n_classes] u = np.asarray([np.sum(np.log(s)) for s in self.scalings_]) return (-0.5 * (norm2 + u) + np.log(self.priors_)) def decision_function(self, X): """Apply decision function to an array of samples. Parameters ---------- X : array-like, shape = [n_samples, n_features] Array of samples (test vectors). Returns ------- C : array, shape = [n_samples, n_classes] or [n_samples,] Decision function values related to each class, per sample. In the two-class case, the shape is [n_samples,], giving the log likelihood ratio of the positive class. """ dec_func = self._decision_function(X) # handle special case of two classes if len(self.classes_) == 2: return dec_func[:, 1] - dec_func[:, 0] return dec_func def predict(self, X): """Perform classification on an array of test vectors X. The predicted class C for each sample in X is returned. Parameters ---------- X : array-like, shape = [n_samples, n_features] Returns ------- C : array, shape = [n_samples] """ d = self._decision_function(X) y_pred = self.classes_.take(d.argmax(1)) return y_pred def predict_proba(self, X): """Return posterior probabilities of classification. Parameters ---------- X : array-like, shape = [n_samples, n_features] Array of samples/test vectors. Returns ------- C : array, shape = [n_samples, n_classes] Posterior probabilities of classification per class. """ values = self._decision_function(X) # compute the likelihood of the underlying gaussian models # up to a multiplicative constant. likelihood = np.exp(values - values.max(axis=1)[:, np.newaxis]) # compute posterior probabilities return likelihood / likelihood.sum(axis=1)[:, np.newaxis] def predict_log_proba(self, X): """Return posterior probabilities of classification. Parameters ---------- X : array-like, shape = [n_samples, n_features] Array of samples/test vectors. Returns ------- C : array, shape = [n_samples, n_classes] Posterior log-probabilities of classification per class. """ # XXX : can do better to avoid precision overflows probas_ = self.predict_proba(X) return np.log(probas_)
bsd-3-clause
smartscheduling/scikit-learn-categorical-tree
sklearn/manifold/t_sne.py
6
19987
# Author: Alexander Fabisch -- <afabisch@informatik.uni-bremen.de> # License: BSD 3 clause (C) 2014 # This is the standard t-SNE implementation. There are faster modifications of # the algorithm: # * Barnes-Hut-SNE: reduces the complexity of the gradient computation from # N^2 to N log N (http://arxiv.org/abs/1301.3342) # * Fast Optimization for t-SNE: # http://cseweb.ucsd.edu/~lvdmaaten/workshops/nips2010/papers/vandermaaten.pdf import numpy as np from scipy import linalg from scipy.spatial.distance import pdist from scipy.spatial.distance import squareform from ..base import BaseEstimator from ..utils import check_array from ..utils import check_random_state from ..utils.extmath import _ravel from ..decomposition import RandomizedPCA from ..metrics.pairwise import pairwise_distances from . import _utils MACHINE_EPSILON = np.finfo(np.double).eps def _joint_probabilities(distances, desired_perplexity, verbose): """Compute joint probabilities p_ij from distances. Parameters ---------- distances : array, shape (n_samples * (n_samples-1) / 2,) Distances of samples are stored as condensed matrices, i.e. we omit the diagonal and duplicate entries and store everything in a one-dimensional array. desired_perplexity : float Desired perplexity of the joint probability distributions. verbose : int Verbosity level. Returns ------- P : array, shape (n_samples * (n_samples-1) / 2,) Condensed joint probability matrix. """ # Compute conditional probabilities such that they approximately match # the desired perplexity conditional_P = _utils._binary_search_perplexity( distances, desired_perplexity, verbose) P = conditional_P + conditional_P.T sum_P = np.maximum(np.sum(P), MACHINE_EPSILON) P = np.maximum(squareform(P) / sum_P, MACHINE_EPSILON) return P def _kl_divergence(params, P, alpha, n_samples, n_components): """t-SNE objective function: KL divergence of p_ijs and q_ijs. Parameters ---------- params : array, shape (n_params,) Unraveled embedding. P : array, shape (n_samples * (n_samples-1) / 2,) Condensed joint probability matrix. alpha : float Degrees of freedom of the Student's-t distribution. n_samples : int Number of samples. n_components : int Dimension of the embedded space. Returns ------- kl_divergence : float Kullback-Leibler divergence of p_ij and q_ij. grad : array, shape (n_params,) Unraveled gradient of the Kullback-Leibler divergence with respect to the embedding. """ X_embedded = params.reshape(n_samples, n_components) # Q is a heavy-tailed distribution: Student's t-distribution n = pdist(X_embedded, "sqeuclidean") n += 1. n /= alpha n **= (alpha + 1.0) / -2.0 Q = np.maximum(n / (2.0 * np.sum(n)), MACHINE_EPSILON) # Optimization trick below: np.dot(x, y) is faster than # np.sum(x * y) because it calls BLAS # Objective: C (Kullback-Leibler divergence of P and Q) kl_divergence = 2.0 * np.dot(P, np.log(P / Q)) # Gradient: dC/dY grad = np.ndarray((n_samples, n_components)) PQd = squareform((P - Q) * n) for i in range(n_samples): np.dot(_ravel(PQd[i]), X_embedded[i] - X_embedded, out=grad[i]) grad = grad.ravel() c = 2.0 * (alpha + 1.0) / alpha grad *= c return kl_divergence, grad def _gradient_descent(objective, p0, it, n_iter, n_iter_without_progress=30, momentum=0.5, learning_rate=1000.0, min_gain=0.01, min_grad_norm=1e-7, min_error_diff=1e-7, verbose=0, args=None): """Batch gradient descent with momentum and individual gains. Parameters ---------- objective : function or callable Should return a tuple of cost and gradient for a given parameter vector. p0 : array-like, shape (n_params,) Initial parameter vector. it : int Current number of iterations (this function will be called more than once during the optimization). n_iter : int Maximum number of gradient descent iterations. n_iter_without_progress : int, optional (default: 30) Maximum number of iterations without progress before we abort the optimization. momentum : float, within (0.0, 1.0), optional (default: 0.5) The momentum generates a weight for previous gradients that decays exponentially. learning_rate : float, optional (default: 1000.0) The learning rate should be extremely high for t-SNE! Values in the range [100.0, 1000.0] are common. min_gain : float, optional (default: 0.01) Minimum individual gain for each parameter. min_grad_norm : float, optional (default: 1e-7) If the gradient norm is below this threshold, the optimization will be aborted. min_error_diff : float, optional (default: 1e-7) If the absolute difference of two successive cost function values is below this threshold, the optimization will be aborted. verbose : int, optional (default: 0) Verbosity level. args : sequence Arguments to pass to objective function. Returns ------- p : array, shape (n_params,) Optimum parameters. error : float Optimum. i : int Last iteration. """ if args is None: args = [] p = p0.copy().ravel() update = np.zeros_like(p) gains = np.ones_like(p) error = np.finfo(np.float).max best_error = np.finfo(np.float).max best_iter = 0 for i in range(it, n_iter): new_error, grad = objective(p, *args) error_diff = np.abs(new_error - error) error = new_error grad_norm = linalg.norm(grad) if error < best_error: best_error = error best_iter = i elif i - best_iter > n_iter_without_progress: if verbose >= 2: print("[t-SNE] Iteration %d: did not make any progress " "during the last %d episodes. Finished." % (i + 1, n_iter_without_progress)) break if min_grad_norm >= grad_norm: if verbose >= 2: print("[t-SNE] Iteration %d: gradient norm %f. Finished." % (i + 1, grad_norm)) break if min_error_diff >= error_diff: if verbose >= 2: print("[t-SNE] Iteration %d: error difference %f. Finished." % (i + 1, error_diff)) break inc = update * grad >= 0.0 dec = np.invert(inc) gains[inc] += 0.05 gains[dec] *= 0.95 np.clip(gains, min_gain, np.inf) grad *= gains update = momentum * update - learning_rate * grad p += update if verbose >= 2 and (i + 1) % 10 == 0: print("[t-SNE] Iteration %d: error = %.7f, gradient norm = %.7f" % (i + 1, error, grad_norm)) return p, error, i def trustworthiness(X, X_embedded, n_neighbors=5, precomputed=False): """Expresses to what extent the local structure is retained. The trustworthiness is within [0, 1]. It is defined as .. math:: T(k) = 1 - \frac{2}{nk (2n - 3k - 1)} \sum^n_{i=1} \sum_{j \in U^{(k)}_i (r(i, j) - k)} where :math:`r(i, j)` is the rank of the embedded datapoint j according to the pairwise distances between the embedded datapoints, :math:`U^{(k)}_i` is the set of points that are in the k nearest neighbors in the embedded space but not in the original space. * "Neighborhood Preservation in Nonlinear Projection Methods: An Experimental Study" J. Venna, S. Kaski * "Learning a Parametric Embedding by Preserving Local Structure" L.J.P. van der Maaten Parameters ---------- X : array, shape (n_samples, n_features) or (n_samples, n_samples) If the metric is 'precomputed' X must be a square distance matrix. Otherwise it contains a sample per row. X_embedded : array, shape (n_samples, n_components) Embedding of the training data in low-dimensional space. n_neighbors : int, optional (default: 5) Number of neighbors k that will be considered. precomputed : bool, optional (default: False) Set this flag if X is a precomputed square distance matrix. Returns ------- trustworthiness : float Trustworthiness of the low-dimensional embedding. """ if precomputed: dist_X = X else: dist_X = pairwise_distances(X, squared=True) dist_X_embedded = pairwise_distances(X_embedded, squared=True) ind_X = np.argsort(dist_X, axis=1) ind_X_embedded = np.argsort(dist_X_embedded, axis=1)[:, 1:n_neighbors + 1] n_samples = X.shape[0] t = 0.0 ranks = np.zeros(n_neighbors) for i in range(n_samples): for j in range(n_neighbors): ranks[j] = np.where(ind_X[i] == ind_X_embedded[i, j])[0][0] ranks -= n_neighbors t += np.sum(ranks[ranks > 0]) t = 1.0 - t * (2.0 / (n_samples * n_neighbors * (2.0 * n_samples - 3.0 * n_neighbors - 1.0))) return t class TSNE(BaseEstimator): """t-distributed Stochastic Neighbor Embedding. t-SNE [1] is a tool to visualize high-dimensional data. It converts similarities between data points to joint probabilities and tries to minimize the Kullback-Leibler divergence between the joint probabilities of the low-dimensional embedding and the high-dimensional data. t-SNE has a cost function that is not convex, i.e. with different initializations we can get different results. It is highly recommended to use another dimensionality reduction method (e.g. PCA for dense data or TruncatedSVD for sparse data) to reduce the number of dimensions to a reasonable amount (e.g. 50) if the number of features is very high. This will suppress some noise and speed up the computation of pairwise distances between samples. For more tips see Laurens van der Maaten's FAQ [2]. Parameters ---------- n_components : int, optional (default: 2) Dimension of the embedded space. perplexity : float, optional (default: 30) The perplexity is related to the number of nearest neighbors that is used in other manifold learning algorithms. Larger datasets usually require a larger perplexity. Consider selcting a value between 5 and 50. The choice is not extremely critical since t-SNE is quite insensitive to this parameter. early_exaggeration : float, optional (default: 4.0) Controls how tight natural clusters in the original space are in the embedded space and how much space will be between them. For larger values, the space between natural clusters will be larger in the embedded space. Again, the choice of this parameter is not very critical. If the cost function increases during initial optimization, the early exaggeration factor or the learning rate might be too high. learning_rate : float, optional (default: 1000) The learning rate can be a critical parameter. It should be between 100 and 1000. If the cost function increases during initial optimization, the early exaggeration factor or the learning rate might be too high. If the cost function gets stuck in a bad local minimum increasing the learning rate helps sometimes. n_iter : int, optional (default: 1000) Maximum number of iterations for the optimization. Should be at least 200. metric : string or callable, optional The metric to use when calculating distance between instances in a feature array. If metric is a string, it must be one of the options allowed by scipy.spatial.distance.pdist for its metric parameter, or a metric listed in pairwise.PAIRWISE_DISTANCE_FUNCTIONS. If metric is "precomputed", X is assumed to be a distance matrix. Alternatively, if metric is a callable function, it is called on each pair of instances (rows) and the resulting value recorded. The callable should take two arrays from X as input and return a value indicating the distance between them. The default is "euclidean" which is interpreted as squared euclidean distance. init : string, optional (default: "random") Initialization of embedding. Possible options are 'random' and 'pca'. PCA initialization cannot be used with precomputed distances and is usually more globally stable than random initialization. verbose : int, optional (default: 0) Verbosity level. random_state : int or RandomState instance or None (default) Pseudo Random Number generator seed control. If None, use the numpy.random singleton. Note that different initializations might result in different local minima of the cost function. Attributes ---------- embedding_ : array-like, shape (n_samples, n_components) Stores the embedding vectors. training_data_ : array-like, shape (n_samples, n_features) Stores the training data. Examples -------- >>> import numpy as np >>> from sklearn.manifold import TSNE >>> X = np.array([[0, 0, 0], [0, 1, 1], [1, 0, 1], [1, 1, 1]]) >>> model = TSNE(n_components=2, random_state=0) >>> model.fit_transform(X) # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE array([[ 887.28..., 238.61...], [ -714.79..., 3243.34...], [ 957.30..., -2505.78...], [-1130.28..., -974.78...]) References ---------- [1] van der Maaten, L.J.P.; Hinton, G.E. Visualizing High-Dimensional Data Using t-SNE. Journal of Machine Learning Research 9:2579-2605, 2008. [2] van der Maaten, L.J.P. t-Distributed Stochastic Neighbor Embedding http://homepage.tudelft.nl/19j49/t-SNE.html """ def __init__(self, n_components=2, perplexity=30.0, early_exaggeration=4.0, learning_rate=1000.0, n_iter=1000, metric="euclidean", init="random", verbose=0, random_state=None): if init not in ["pca", "random"]: raise ValueError("'init' must be either 'pca' or 'random'") self.n_components = n_components self.perplexity = perplexity self.early_exaggeration = early_exaggeration self.learning_rate = learning_rate self.n_iter = n_iter self.metric = metric self.init = init self.verbose = verbose self.random_state = random_state def fit(self, X, y=None): """Fit the model using X as training data. Parameters ---------- X : array, shape (n_samples, n_features) or (n_samples, n_samples) If the metric is 'precomputed' X must be a square distance matrix. Otherwise it contains a sample per row. """ X = check_array(X, accept_sparse=['csr', 'csc', 'coo'], dtype=np.float64) random_state = check_random_state(self.random_state) if self.early_exaggeration < 1.0: raise ValueError("early_exaggeration must be at least 1, but is " "%f" % self.early_exaggeration) if self.n_iter < 200: raise ValueError("n_iter should be at least 200") if self.metric == "precomputed": if self.init == 'pca': raise ValueError("The parameter init=\"pca\" cannot be used " "with metric=\"precomputed\".") if X.shape[0] != X.shape[1]: raise ValueError("X should be a square distance matrix") distances = X else: if self.verbose: print("[t-SNE] Computing pairwise distances...") if self.metric == "euclidean": distances = pairwise_distances(X, metric=self.metric, squared=True) else: distances = pairwise_distances(X, metric=self.metric) # Degrees of freedom of the Student's t-distribution. The suggestion # alpha = n_components - 1 comes from "Learning a Parametric Embedding # by Preserving Local Structure" Laurens van der Maaten, 2009. alpha = max(self.n_components - 1.0, 1) n_samples = X.shape[0] self.training_data_ = X P = _joint_probabilities(distances, self.perplexity, self.verbose) if self.init == 'pca': pca = RandomizedPCA(n_components=self.n_components, random_state=random_state) X_embedded = pca.fit_transform(X) elif self.init == 'random': X_embedded = None else: raise ValueError("Unsupported initialization scheme: %s" % self.init) self.embedding_ = self._tsne(P, alpha, n_samples, random_state, X_embedded=X_embedded) def _tsne(self, P, alpha, n_samples, random_state, X_embedded=None): """Runs t-SNE.""" # t-SNE minimizes the Kullback-Leiber divergence of the Gaussians P # and the Student's t-distributions Q. The optimization algorithm that # we use is batch gradient descent with three stages: # * early exaggeration with momentum 0.5 # * early exaggeration with momentum 0.8 # * final optimization with momentum 0.8 # The embedding is initialized with iid samples from Gaussians with # standard deviation 1e-4. if X_embedded is None: # Initialize embedding randomly X_embedded = 1e-4 * random_state.randn(n_samples, self.n_components) params = X_embedded.ravel() # Early exaggeration P *= self.early_exaggeration params, error, it = _gradient_descent( _kl_divergence, params, it=0, n_iter=50, momentum=0.5, min_grad_norm=0.0, min_error_diff=0.0, learning_rate=self.learning_rate, verbose=self.verbose, args=[P, alpha, n_samples, self.n_components]) params, error, it = _gradient_descent( _kl_divergence, params, it=it + 1, n_iter=100, momentum=0.8, min_grad_norm=0.0, min_error_diff=0.0, learning_rate=self.learning_rate, verbose=self.verbose, args=[P, alpha, n_samples, self.n_components]) if self.verbose: print("[t-SNE] Error after %d iterations with early " "exaggeration: %f" % (it + 1, error)) # Final optimization P /= self.early_exaggeration params, error, it = _gradient_descent( _kl_divergence, params, it=it + 1, n_iter=self.n_iter, momentum=0.8, learning_rate=self.learning_rate, verbose=self.verbose, args=[P, alpha, n_samples, self.n_components]) if self.verbose: print("[t-SNE] Error after %d iterations: %f" % (it + 1, error)) X_embedded = params.reshape(n_samples, self.n_components) return X_embedded def fit_transform(self, X, y=None): """Transform X to the embedded space. Parameters ---------- X : array, shape (n_samples, n_features) or (n_samples, n_samples) If the metric is 'precomputed' X must be a square distance matrix. Otherwise it contains a sample per row. Returns ------- X_new : array, shape (n_samples, n_components) Embedding of the training data in low-dimensional space. """ self.fit(X) return self.embedding_
bsd-3-clause
smartscheduling/scikit-learn-categorical-tree
sklearn/utils/class_weight.py
20
6468
# Authors: Andreas Mueller # Manoj Kumar # License: BSD 3 clause import numpy as np from ..externals import six from ..utils.fixes import in1d from .fixes import bincount def compute_class_weight(class_weight, classes, y): """Estimate class weights for unbalanced datasets. Parameters ---------- class_weight : dict, 'auto' or None If 'auto', class weights will be given inverse proportional to the frequency of the class in the data. If a dictionary is given, keys are classes and values are corresponding class weights. If None is given, the class weights will be uniform. classes : ndarray Array of the classes occurring in the data, as given by ``np.unique(y_org)`` with ``y_org`` the original class labels. y : array-like, shape (n_samples,) Array of original class labels per sample; Returns ------- class_weight_vect : ndarray, shape (n_classes,) Array with class_weight_vect[i] the weight for i-th class """ # Import error caused by circular imports. from ..preprocessing import LabelEncoder if class_weight is None or len(class_weight) == 0: # uniform class weights weight = np.ones(classes.shape[0], dtype=np.float64, order='C') elif class_weight == 'auto': # Find the weight of each class as present in y. le = LabelEncoder() y_ind = le.fit_transform(y) if not all(np.in1d(classes, le.classes_)): raise ValueError("classes should have valid labels that are in y") # inversely proportional to the number of samples in the class recip_freq = 1. / bincount(y_ind) weight = recip_freq[le.transform(classes)] / np.mean(recip_freq) else: # user-defined dictionary weight = np.ones(classes.shape[0], dtype=np.float64, order='C') if not isinstance(class_weight, dict): raise ValueError("class_weight must be dict, 'auto', or None," " got: %r" % class_weight) for c in class_weight: i = np.searchsorted(classes, c) if classes[i] != c: raise ValueError("Class label %d not present." % c) else: weight[i] = class_weight[c] return weight def compute_sample_weight(class_weight, y, indices=None): """Estimate sample weights by class for unbalanced datasets. Parameters ---------- class_weight : dict, list of dicts, "auto", or None, optional Weights associated with classes in the form ``{class_label: weight}``. If not given, all classes are supposed to have weight one. For multi-output problems, a list of dicts can be provided in the same order as the columns of y. The "auto" mode uses the values of y to automatically adjust weights inversely proportional to class frequencies in the input data. For multi-output, the weights of each column of y will be multiplied. y : array-like, shape = [n_samples] or [n_samples, n_outputs] Array of original class labels per sample. indices : array-like, shape (n_subsample,), or None Array of indices to be used in a subsample. Can be of length less than n_samples in the case of a subsample, or equal to n_samples in the case of a bootstrap subsample with repeated indices. If None, the sample weight will be calculated over the full sample. Only "auto" is supported for class_weight if this is provided. Returns ------- sample_weight_vect : ndarray, shape (n_samples,) Array with sample weights as applied to the original y """ y = np.atleast_1d(y) if y.ndim == 1: y = np.reshape(y, (-1, 1)) n_outputs = y.shape[1] if isinstance(class_weight, six.string_types): if class_weight != 'auto': raise ValueError('The only valid preset for class_weight is ' '"auto". Given "%s".' % class_weight) elif (indices is not None and not isinstance(class_weight, six.string_types)): raise ValueError('The only valid class_weight for subsampling is ' '"auto". Given "%s".' % class_weight) elif n_outputs > 1: if (not hasattr(class_weight, "__iter__") or isinstance(class_weight, dict)): raise ValueError("For multi-output, class_weight should be a " "list of dicts, or a valid string.") if len(class_weight) != n_outputs: raise ValueError("For multi-output, number of elements in " "class_weight should match number of outputs.") expanded_class_weight = [] for k in range(n_outputs): y_full = y[:, k] classes_full = np.unique(y_full) classes_missing = None if class_weight == 'auto' or n_outputs == 1: class_weight_k = class_weight else: class_weight_k = class_weight[k] if indices is not None: # Get class weights for the subsample, covering all classes in # case some labels that were present in the original data are # missing from the sample. y_subsample = y[indices, k] classes_subsample = np.unique(y_subsample) weight_k = np.choose(np.searchsorted(classes_subsample, classes_full), compute_class_weight(class_weight_k, classes_subsample, y_subsample), mode='clip') classes_missing = set(classes_full) - set(classes_subsample) else: weight_k = compute_class_weight(class_weight_k, classes_full, y_full) weight_k = weight_k[np.searchsorted(classes_full, y_full)] if classes_missing: # Make missing classes' weight zero weight_k[in1d(y_full, list(classes_missing))] = 0. expanded_class_weight.append(weight_k) expanded_class_weight = np.prod(expanded_class_weight, axis=0, dtype=np.float64) return expanded_class_weight
bsd-3-clause
bodacea/opendatatools
ckandump/ckan_calls.py
1
3584
#!/usr/bin/env python ''' Add and access data in CKAN2.0 instance (e.g. datahub.io) References include: * http://docs.ckan.org/en/ckan-2.0/api.html * http://ckan.readthedocs.org/en/ckan-1.7.1/using-data-api.html Sara-Jayne Terp, 2013 ''' import urllib2 import urllib import json import pickle import ckanclient import pprint ''' call_ckan_api: Sara-Jayne Terp, 2013 ''' #Globals ckan = None def call_ckan_api(ckanurl, apikey, apicall, data): # Make the HTTP request. data_string = urllib.quote(json.dumps(data)) headers = {'Authorization': apikey} req = urllib2.Request(ckanurl+'api/3/'+apicall, data_string, headers) response = urllib2.urlopen(req) # Use the json module to load CKAN's response into a dictionary. ## assert response.code == 200 response_dict = json.loads(response.read()) # Check the contents of the response. ## assert response_dict['success'] is True result = response_dict['result'] ## pprint.pprint(result) return(result) def check_ckan_package(ckanurl, apikey, packagename, ownername): action = "action/package_show" ckandata = {'name':packagename,'owner_org':ownername} result = call_ckan_api(ckanurl, apikey, action, ckandata) return(result) def create_ckan_package(ckanurl, apikey, packagename, ownername): action = "action/package_create" ckandata = {'name':packagename,'owner_org':ownername} result = call_ckan_api(ckanurl, apikey, action, ckandata) return(result) def create_ckan_resource(ckanurl, apikey, data, resourcename, packagename, ownername): action = "action/resource_create" #NB Must put owner_org in here, or call will fail ckandata = {'name':resourcename, 'package_id':packagename, 'owner_org':ownername} ckandata.update(data) result = call_ckan_api(ckanurl, apikey, action, ckandata) return(result) #Read in CKAN and google keys def read_keys(keyfile): fin = open(keyfile, 'rb') ckankeys = {} googlekeys = {} ckankeys['url'] = fin.readline().strip() ckankeys['apikey'] = fin.readline().strip() googlekeys['user'] = fin.readline().strip() googlekeys['pass'] = fin.readline().strip() googlekeys['doc'] = fin.readline().strip() fin.close() return(ckankeys, googlekeys) def dump_ckan_to_pickle(keyfile): #Connect [ckankeys, googlekeys] = read_keys(keyfile) fout = open("pickled_ckan_contents.pk1", "wb") ckan = ckanclient.CkanClient( base_location=ckankeys['url']+'api', api_key=ckankeys['apikey']) #tag list tag_list = ckan.tag_register_get() pickle.dump(tag_list, fout, -1) #force pickle to use highest protocol available #packages package_entities = {} package_list = ckan.package_register_get() print package_list for package_name in package_list: ckan.package_entity_get(package_name) package_entities[package_name] = ckan.last_message pickle.dump(package_entities, fout, -1) #groups groups = {} group_list = ckan.group_register_get() print group_list for group_name in group_list: groups[group_name] = ckan.group_entity_get(group_name) pickle.dump(groups, fout, -1) ###datasets ##datasets = {} ##dataset_list = ckan.dataset_register_get() ##for dataset_name in dataset_list: ## datasets[dataset_name] = ckan.dataset_entity_get(dataset_name) ##pickle.dump(datasets, fout, -1) fout.close() return() def reverse_ckan_pickle(filename): fin = open(filename, 'rb') tags = pickle.load(fin) packages = pickle.load(fin) groups = pickle.load(fin) fin.close() return tags, packages, groups
gpl-3.0
cauchycui/scikit-learn
examples/linear_model/plot_logistic_l1_l2_sparsity.py
377
2601
""" ============================================== L1 Penalty and Sparsity in Logistic Regression ============================================== Comparison of the sparsity (percentage of zero coefficients) of solutions when L1 and L2 penalty are used for different values of C. We can see that large values of C give more freedom to the model. Conversely, smaller values of C constrain the model more. In the L1 penalty case, this leads to sparser solutions. We classify 8x8 images of digits into two classes: 0-4 against 5-9. The visualization shows coefficients of the models for varying C. """ print(__doc__) # Authors: Alexandre Gramfort <alexandre.gramfort@inria.fr> # Mathieu Blondel <mathieu@mblondel.org> # Andreas Mueller <amueller@ais.uni-bonn.de> # License: BSD 3 clause import numpy as np import matplotlib.pyplot as plt from sklearn.linear_model import LogisticRegression from sklearn import datasets from sklearn.preprocessing import StandardScaler digits = datasets.load_digits() X, y = digits.data, digits.target X = StandardScaler().fit_transform(X) # classify small against large digits y = (y > 4).astype(np.int) # Set regularization parameter for i, C in enumerate((100, 1, 0.01)): # turn down tolerance for short training time clf_l1_LR = LogisticRegression(C=C, penalty='l1', tol=0.01) clf_l2_LR = LogisticRegression(C=C, penalty='l2', tol=0.01) clf_l1_LR.fit(X, y) clf_l2_LR.fit(X, y) coef_l1_LR = clf_l1_LR.coef_.ravel() coef_l2_LR = clf_l2_LR.coef_.ravel() # coef_l1_LR contains zeros due to the # L1 sparsity inducing norm sparsity_l1_LR = np.mean(coef_l1_LR == 0) * 100 sparsity_l2_LR = np.mean(coef_l2_LR == 0) * 100 print("C=%.2f" % C) print("Sparsity with L1 penalty: %.2f%%" % sparsity_l1_LR) print("score with L1 penalty: %.4f" % clf_l1_LR.score(X, y)) print("Sparsity with L2 penalty: %.2f%%" % sparsity_l2_LR) print("score with L2 penalty: %.4f" % clf_l2_LR.score(X, y)) l1_plot = plt.subplot(3, 2, 2 * i + 1) l2_plot = plt.subplot(3, 2, 2 * (i + 1)) if i == 0: l1_plot.set_title("L1 penalty") l2_plot.set_title("L2 penalty") l1_plot.imshow(np.abs(coef_l1_LR.reshape(8, 8)), interpolation='nearest', cmap='binary', vmax=1, vmin=0) l2_plot.imshow(np.abs(coef_l2_LR.reshape(8, 8)), interpolation='nearest', cmap='binary', vmax=1, vmin=0) plt.text(-8, 3, "C = %.2f" % C) l1_plot.set_xticks(()) l1_plot.set_yticks(()) l2_plot.set_xticks(()) l2_plot.set_yticks(()) plt.show()
bsd-3-clause
lorenzo-desantis/mne-python
examples/inverse/plot_label_activation_from_stc.py
50
1949
""" ================================================== Extracting time course from source_estimate object ================================================== Load a SourceEstimate object from stc files and extract the time course of activation in individual labels, as well as in a complex label formed through merging two labels. """ # Author: Christian Brodbeck <christianbrodbeck@nyu.edu> # # License: BSD (3-clause) import os import mne from mne.datasets import sample import matplotlib.pyplot as plt print(__doc__) data_path = sample.data_path() os.environ['SUBJECTS_DIR'] = data_path + '/subjects' meg_path = data_path + '/MEG/sample' # load the stc stc = mne.read_source_estimate(meg_path + '/sample_audvis-meg') # load the labels aud_lh = mne.read_label(meg_path + '/labels/Aud-lh.label') aud_rh = mne.read_label(meg_path + '/labels/Aud-rh.label') # extract the time course for different labels from the stc stc_lh = stc.in_label(aud_lh) stc_rh = stc.in_label(aud_rh) stc_bh = stc.in_label(aud_lh + aud_rh) # calculate center of mass and transform to mni coordinates vtx, _, t_lh = stc_lh.center_of_mass('sample') mni_lh = mne.vertex_to_mni(vtx, 0, 'sample')[0] vtx, _, t_rh = stc_rh.center_of_mass('sample') mni_rh = mne.vertex_to_mni(vtx, 1, 'sample')[0] # plot the activation plt.figure() plt.axes([.1, .275, .85, .625]) hl = plt.plot(stc.times, stc_lh.data.mean(0), 'b')[0] hr = plt.plot(stc.times, stc_rh.data.mean(0), 'g')[0] hb = plt.plot(stc.times, stc_bh.data.mean(0), 'r')[0] plt.xlabel('Time (s)') plt.ylabel('Source amplitude (dSPM)') plt.xlim(stc.times[0], stc.times[-1]) # add a legend including center-of-mass mni coordinates to the plot labels = ['LH: center of mass = %s' % mni_lh.round(2), 'RH: center of mass = %s' % mni_rh.round(2), 'Combined LH & RH'] plt.figlegend([hl, hr, hb], labels, 'lower center') plt.suptitle('Average activation in auditory cortex labels', fontsize=20) plt.show()
bsd-3-clause
cauchycui/scikit-learn
sklearn/utils/__init__.py
131
14185
""" The :mod:`sklearn.utils` module includes various utilities. """ from collections import Sequence import numpy as np from scipy.sparse import issparse import warnings from .murmurhash import murmurhash3_32 from .validation import (as_float_array, assert_all_finite, check_random_state, column_or_1d, check_array, check_consistent_length, check_X_y, indexable, check_symmetric, DataConversionWarning) from .class_weight import compute_class_weight, compute_sample_weight from ..externals.joblib import cpu_count __all__ = ["murmurhash3_32", "as_float_array", "assert_all_finite", "check_array", "check_random_state", "compute_class_weight", "compute_sample_weight", "column_or_1d", "safe_indexing", "check_consistent_length", "check_X_y", 'indexable', "check_symmetric"] class deprecated(object): """Decorator to mark a function or class as deprecated. Issue a warning when the function is called/the class is instantiated and adds a warning to the docstring. The optional extra argument will be appended to the deprecation message and the docstring. Note: to use this with the default value for extra, put in an empty of parentheses: >>> from sklearn.utils import deprecated >>> deprecated() # doctest: +ELLIPSIS <sklearn.utils.deprecated object at ...> >>> @deprecated() ... def some_function(): pass """ # Adapted from http://wiki.python.org/moin/PythonDecoratorLibrary, # but with many changes. def __init__(self, extra=''): """ Parameters ---------- extra: string to be added to the deprecation messages """ self.extra = extra def __call__(self, obj): if isinstance(obj, type): return self._decorate_class(obj) else: return self._decorate_fun(obj) def _decorate_class(self, cls): msg = "Class %s is deprecated" % cls.__name__ if self.extra: msg += "; %s" % self.extra # FIXME: we should probably reset __new__ for full generality init = cls.__init__ def wrapped(*args, **kwargs): warnings.warn(msg, category=DeprecationWarning) return init(*args, **kwargs) cls.__init__ = wrapped wrapped.__name__ = '__init__' wrapped.__doc__ = self._update_doc(init.__doc__) wrapped.deprecated_original = init return cls def _decorate_fun(self, fun): """Decorate function fun""" msg = "Function %s is deprecated" % fun.__name__ if self.extra: msg += "; %s" % self.extra def wrapped(*args, **kwargs): warnings.warn(msg, category=DeprecationWarning) return fun(*args, **kwargs) wrapped.__name__ = fun.__name__ wrapped.__dict__ = fun.__dict__ wrapped.__doc__ = self._update_doc(fun.__doc__) return wrapped def _update_doc(self, olddoc): newdoc = "DEPRECATED" if self.extra: newdoc = "%s: %s" % (newdoc, self.extra) if olddoc: newdoc = "%s\n\n%s" % (newdoc, olddoc) return newdoc def safe_mask(X, mask): """Return a mask which is safe to use on X. Parameters ---------- X : {array-like, sparse matrix} Data on which to apply mask. mask: array Mask to be used on X. Returns ------- mask """ mask = np.asarray(mask) if np.issubdtype(mask.dtype, np.int): return mask if hasattr(X, "toarray"): ind = np.arange(mask.shape[0]) mask = ind[mask] return mask def safe_indexing(X, indices): """Return items or rows from X using indices. Allows simple indexing of lists or arrays. Parameters ---------- X : array-like, sparse-matrix, list. Data from which to sample rows or items. indices : array-like, list Indices according to which X will be subsampled. """ if hasattr(X, "iloc"): # Pandas Dataframes and Series try: return X.iloc[indices] except ValueError: # Cython typed memoryviews internally used in pandas do not support # readonly buffers. warnings.warn("Copying input dataframe for slicing.", DataConversionWarning) return X.copy().iloc[indices] elif hasattr(X, "shape"): if hasattr(X, 'take') and (hasattr(indices, 'dtype') and indices.dtype.kind == 'i'): # This is often substantially faster than X[indices] return X.take(indices, axis=0) else: return X[indices] else: return [X[idx] for idx in indices] def resample(*arrays, **options): """Resample arrays or sparse matrices in a consistent way The default strategy implements one step of the bootstrapping procedure. Parameters ---------- *arrays : sequence of indexable data-structures Indexable data-structures can be arrays, lists, dataframes or scipy sparse matrices with consistent first dimension. replace : boolean, True by default Implements resampling with replacement. If False, this will implement (sliced) random permutations. n_samples : int, None by default Number of samples to generate. If left to None this is automatically set to the first dimension of the arrays. random_state : int or RandomState instance Control the shuffling for reproducible behavior. Returns ------- resampled_arrays : sequence of indexable data-structures Sequence of resampled views of the collections. The original arrays are not impacted. Examples -------- It is possible to mix sparse and dense arrays in the same run:: >>> X = np.array([[1., 0.], [2., 1.], [0., 0.]]) >>> y = np.array([0, 1, 2]) >>> from scipy.sparse import coo_matrix >>> X_sparse = coo_matrix(X) >>> from sklearn.utils import resample >>> X, X_sparse, y = resample(X, X_sparse, y, random_state=0) >>> X array([[ 1., 0.], [ 2., 1.], [ 1., 0.]]) >>> X_sparse # doctest: +ELLIPSIS +NORMALIZE_WHITESPACE <3x2 sparse matrix of type '<... 'numpy.float64'>' with 4 stored elements in Compressed Sparse Row format> >>> X_sparse.toarray() array([[ 1., 0.], [ 2., 1.], [ 1., 0.]]) >>> y array([0, 1, 0]) >>> resample(y, n_samples=2, random_state=0) array([0, 1]) See also -------- :func:`sklearn.utils.shuffle` """ random_state = check_random_state(options.pop('random_state', None)) replace = options.pop('replace', True) max_n_samples = options.pop('n_samples', None) if options: raise ValueError("Unexpected kw arguments: %r" % options.keys()) if len(arrays) == 0: return None first = arrays[0] n_samples = first.shape[0] if hasattr(first, 'shape') else len(first) if max_n_samples is None: max_n_samples = n_samples if max_n_samples > n_samples: raise ValueError("Cannot sample %d out of arrays with dim %d" % ( max_n_samples, n_samples)) check_consistent_length(*arrays) if replace: indices = random_state.randint(0, n_samples, size=(max_n_samples,)) else: indices = np.arange(n_samples) random_state.shuffle(indices) indices = indices[:max_n_samples] # convert sparse matrices to CSR for row-based indexing arrays = [a.tocsr() if issparse(a) else a for a in arrays] resampled_arrays = [safe_indexing(a, indices) for a in arrays] if len(resampled_arrays) == 1: # syntactic sugar for the unit argument case return resampled_arrays[0] else: return resampled_arrays def shuffle(*arrays, **options): """Shuffle arrays or sparse matrices in a consistent way This is a convenience alias to ``resample(*arrays, replace=False)`` to do random permutations of the collections. Parameters ---------- *arrays : sequence of indexable data-structures Indexable data-structures can be arrays, lists, dataframes or scipy sparse matrices with consistent first dimension. random_state : int or RandomState instance Control the shuffling for reproducible behavior. n_samples : int, None by default Number of samples to generate. If left to None this is automatically set to the first dimension of the arrays. Returns ------- shuffled_arrays : sequence of indexable data-structures Sequence of shuffled views of the collections. The original arrays are not impacted. Examples -------- It is possible to mix sparse and dense arrays in the same run:: >>> X = np.array([[1., 0.], [2., 1.], [0., 0.]]) >>> y = np.array([0, 1, 2]) >>> from scipy.sparse import coo_matrix >>> X_sparse = coo_matrix(X) >>> from sklearn.utils import shuffle >>> X, X_sparse, y = shuffle(X, X_sparse, y, random_state=0) >>> X array([[ 0., 0.], [ 2., 1.], [ 1., 0.]]) >>> X_sparse # doctest: +ELLIPSIS +NORMALIZE_WHITESPACE <3x2 sparse matrix of type '<... 'numpy.float64'>' with 3 stored elements in Compressed Sparse Row format> >>> X_sparse.toarray() array([[ 0., 0.], [ 2., 1.], [ 1., 0.]]) >>> y array([2, 1, 0]) >>> shuffle(y, n_samples=2, random_state=0) array([0, 1]) See also -------- :func:`sklearn.utils.resample` """ options['replace'] = False return resample(*arrays, **options) def safe_sqr(X, copy=True): """Element wise squaring of array-likes and sparse matrices. Parameters ---------- X : array like, matrix, sparse matrix copy : boolean, optional, default True Whether to create a copy of X and operate on it or to perform inplace computation (default behaviour). Returns ------- X ** 2 : element wise square """ X = check_array(X, accept_sparse=['csr', 'csc', 'coo']) if issparse(X): if copy: X = X.copy() X.data **= 2 else: if copy: X = X ** 2 else: X **= 2 return X def gen_batches(n, batch_size): """Generator to create slices containing batch_size elements, from 0 to n. The last slice may contain less than batch_size elements, when batch_size does not divide n. Examples -------- >>> from sklearn.utils import gen_batches >>> list(gen_batches(7, 3)) [slice(0, 3, None), slice(3, 6, None), slice(6, 7, None)] >>> list(gen_batches(6, 3)) [slice(0, 3, None), slice(3, 6, None)] >>> list(gen_batches(2, 3)) [slice(0, 2, None)] """ start = 0 for _ in range(int(n // batch_size)): end = start + batch_size yield slice(start, end) start = end if start < n: yield slice(start, n) def gen_even_slices(n, n_packs, n_samples=None): """Generator to create n_packs slices going up to n. Pass n_samples when the slices are to be used for sparse matrix indexing; slicing off-the-end raises an exception, while it works for NumPy arrays. Examples -------- >>> from sklearn.utils import gen_even_slices >>> list(gen_even_slices(10, 1)) [slice(0, 10, None)] >>> list(gen_even_slices(10, 10)) #doctest: +ELLIPSIS [slice(0, 1, None), slice(1, 2, None), ..., slice(9, 10, None)] >>> list(gen_even_slices(10, 5)) #doctest: +ELLIPSIS [slice(0, 2, None), slice(2, 4, None), ..., slice(8, 10, None)] >>> list(gen_even_slices(10, 3)) [slice(0, 4, None), slice(4, 7, None), slice(7, 10, None)] """ start = 0 if n_packs < 1: raise ValueError("gen_even_slices got n_packs=%s, must be >=1" % n_packs) for pack_num in range(n_packs): this_n = n // n_packs if pack_num < n % n_packs: this_n += 1 if this_n > 0: end = start + this_n if n_samples is not None: end = min(n_samples, end) yield slice(start, end, None) start = end def _get_n_jobs(n_jobs): """Get number of jobs for the computation. This function reimplements the logic of joblib to determine the actual number of jobs depending on the cpu count. If -1 all CPUs are used. If 1 is given, no parallel computing code is used at all, which is useful for debugging. For n_jobs below -1, (n_cpus + 1 + n_jobs) are used. Thus for n_jobs = -2, all CPUs but one are used. Parameters ---------- n_jobs : int Number of jobs stated in joblib convention. Returns ------- n_jobs : int The actual number of jobs as positive integer. Examples -------- >>> from sklearn.utils import _get_n_jobs >>> _get_n_jobs(4) 4 >>> jobs = _get_n_jobs(-2) >>> assert jobs == max(cpu_count() - 1, 1) >>> _get_n_jobs(0) Traceback (most recent call last): ... ValueError: Parameter n_jobs == 0 has no meaning. """ if n_jobs < 0: return max(cpu_count() + 1 + n_jobs, 1) elif n_jobs == 0: raise ValueError('Parameter n_jobs == 0 has no meaning.') else: return n_jobs def tosequence(x): """Cast iterable x to a Sequence, avoiding a copy if possible.""" if isinstance(x, np.ndarray): return np.asarray(x) elif isinstance(x, Sequence): return x else: return list(x) class ConvergenceWarning(UserWarning): """Custom warning to capture convergence problems""" class DataDimensionalityWarning(UserWarning): """Custom warning to notify potential issues with data dimensionality"""
bsd-3-clause
AlexRobson/scikit-learn
sklearn/utils/__init__.py
131
14185
""" The :mod:`sklearn.utils` module includes various utilities. """ from collections import Sequence import numpy as np from scipy.sparse import issparse import warnings from .murmurhash import murmurhash3_32 from .validation import (as_float_array, assert_all_finite, check_random_state, column_or_1d, check_array, check_consistent_length, check_X_y, indexable, check_symmetric, DataConversionWarning) from .class_weight import compute_class_weight, compute_sample_weight from ..externals.joblib import cpu_count __all__ = ["murmurhash3_32", "as_float_array", "assert_all_finite", "check_array", "check_random_state", "compute_class_weight", "compute_sample_weight", "column_or_1d", "safe_indexing", "check_consistent_length", "check_X_y", 'indexable', "check_symmetric"] class deprecated(object): """Decorator to mark a function or class as deprecated. Issue a warning when the function is called/the class is instantiated and adds a warning to the docstring. The optional extra argument will be appended to the deprecation message and the docstring. Note: to use this with the default value for extra, put in an empty of parentheses: >>> from sklearn.utils import deprecated >>> deprecated() # doctest: +ELLIPSIS <sklearn.utils.deprecated object at ...> >>> @deprecated() ... def some_function(): pass """ # Adapted from http://wiki.python.org/moin/PythonDecoratorLibrary, # but with many changes. def __init__(self, extra=''): """ Parameters ---------- extra: string to be added to the deprecation messages """ self.extra = extra def __call__(self, obj): if isinstance(obj, type): return self._decorate_class(obj) else: return self._decorate_fun(obj) def _decorate_class(self, cls): msg = "Class %s is deprecated" % cls.__name__ if self.extra: msg += "; %s" % self.extra # FIXME: we should probably reset __new__ for full generality init = cls.__init__ def wrapped(*args, **kwargs): warnings.warn(msg, category=DeprecationWarning) return init(*args, **kwargs) cls.__init__ = wrapped wrapped.__name__ = '__init__' wrapped.__doc__ = self._update_doc(init.__doc__) wrapped.deprecated_original = init return cls def _decorate_fun(self, fun): """Decorate function fun""" msg = "Function %s is deprecated" % fun.__name__ if self.extra: msg += "; %s" % self.extra def wrapped(*args, **kwargs): warnings.warn(msg, category=DeprecationWarning) return fun(*args, **kwargs) wrapped.__name__ = fun.__name__ wrapped.__dict__ = fun.__dict__ wrapped.__doc__ = self._update_doc(fun.__doc__) return wrapped def _update_doc(self, olddoc): newdoc = "DEPRECATED" if self.extra: newdoc = "%s: %s" % (newdoc, self.extra) if olddoc: newdoc = "%s\n\n%s" % (newdoc, olddoc) return newdoc def safe_mask(X, mask): """Return a mask which is safe to use on X. Parameters ---------- X : {array-like, sparse matrix} Data on which to apply mask. mask: array Mask to be used on X. Returns ------- mask """ mask = np.asarray(mask) if np.issubdtype(mask.dtype, np.int): return mask if hasattr(X, "toarray"): ind = np.arange(mask.shape[0]) mask = ind[mask] return mask def safe_indexing(X, indices): """Return items or rows from X using indices. Allows simple indexing of lists or arrays. Parameters ---------- X : array-like, sparse-matrix, list. Data from which to sample rows or items. indices : array-like, list Indices according to which X will be subsampled. """ if hasattr(X, "iloc"): # Pandas Dataframes and Series try: return X.iloc[indices] except ValueError: # Cython typed memoryviews internally used in pandas do not support # readonly buffers. warnings.warn("Copying input dataframe for slicing.", DataConversionWarning) return X.copy().iloc[indices] elif hasattr(X, "shape"): if hasattr(X, 'take') and (hasattr(indices, 'dtype') and indices.dtype.kind == 'i'): # This is often substantially faster than X[indices] return X.take(indices, axis=0) else: return X[indices] else: return [X[idx] for idx in indices] def resample(*arrays, **options): """Resample arrays or sparse matrices in a consistent way The default strategy implements one step of the bootstrapping procedure. Parameters ---------- *arrays : sequence of indexable data-structures Indexable data-structures can be arrays, lists, dataframes or scipy sparse matrices with consistent first dimension. replace : boolean, True by default Implements resampling with replacement. If False, this will implement (sliced) random permutations. n_samples : int, None by default Number of samples to generate. If left to None this is automatically set to the first dimension of the arrays. random_state : int or RandomState instance Control the shuffling for reproducible behavior. Returns ------- resampled_arrays : sequence of indexable data-structures Sequence of resampled views of the collections. The original arrays are not impacted. Examples -------- It is possible to mix sparse and dense arrays in the same run:: >>> X = np.array([[1., 0.], [2., 1.], [0., 0.]]) >>> y = np.array([0, 1, 2]) >>> from scipy.sparse import coo_matrix >>> X_sparse = coo_matrix(X) >>> from sklearn.utils import resample >>> X, X_sparse, y = resample(X, X_sparse, y, random_state=0) >>> X array([[ 1., 0.], [ 2., 1.], [ 1., 0.]]) >>> X_sparse # doctest: +ELLIPSIS +NORMALIZE_WHITESPACE <3x2 sparse matrix of type '<... 'numpy.float64'>' with 4 stored elements in Compressed Sparse Row format> >>> X_sparse.toarray() array([[ 1., 0.], [ 2., 1.], [ 1., 0.]]) >>> y array([0, 1, 0]) >>> resample(y, n_samples=2, random_state=0) array([0, 1]) See also -------- :func:`sklearn.utils.shuffle` """ random_state = check_random_state(options.pop('random_state', None)) replace = options.pop('replace', True) max_n_samples = options.pop('n_samples', None) if options: raise ValueError("Unexpected kw arguments: %r" % options.keys()) if len(arrays) == 0: return None first = arrays[0] n_samples = first.shape[0] if hasattr(first, 'shape') else len(first) if max_n_samples is None: max_n_samples = n_samples if max_n_samples > n_samples: raise ValueError("Cannot sample %d out of arrays with dim %d" % ( max_n_samples, n_samples)) check_consistent_length(*arrays) if replace: indices = random_state.randint(0, n_samples, size=(max_n_samples,)) else: indices = np.arange(n_samples) random_state.shuffle(indices) indices = indices[:max_n_samples] # convert sparse matrices to CSR for row-based indexing arrays = [a.tocsr() if issparse(a) else a for a in arrays] resampled_arrays = [safe_indexing(a, indices) for a in arrays] if len(resampled_arrays) == 1: # syntactic sugar for the unit argument case return resampled_arrays[0] else: return resampled_arrays def shuffle(*arrays, **options): """Shuffle arrays or sparse matrices in a consistent way This is a convenience alias to ``resample(*arrays, replace=False)`` to do random permutations of the collections. Parameters ---------- *arrays : sequence of indexable data-structures Indexable data-structures can be arrays, lists, dataframes or scipy sparse matrices with consistent first dimension. random_state : int or RandomState instance Control the shuffling for reproducible behavior. n_samples : int, None by default Number of samples to generate. If left to None this is automatically set to the first dimension of the arrays. Returns ------- shuffled_arrays : sequence of indexable data-structures Sequence of shuffled views of the collections. The original arrays are not impacted. Examples -------- It is possible to mix sparse and dense arrays in the same run:: >>> X = np.array([[1., 0.], [2., 1.], [0., 0.]]) >>> y = np.array([0, 1, 2]) >>> from scipy.sparse import coo_matrix >>> X_sparse = coo_matrix(X) >>> from sklearn.utils import shuffle >>> X, X_sparse, y = shuffle(X, X_sparse, y, random_state=0) >>> X array([[ 0., 0.], [ 2., 1.], [ 1., 0.]]) >>> X_sparse # doctest: +ELLIPSIS +NORMALIZE_WHITESPACE <3x2 sparse matrix of type '<... 'numpy.float64'>' with 3 stored elements in Compressed Sparse Row format> >>> X_sparse.toarray() array([[ 0., 0.], [ 2., 1.], [ 1., 0.]]) >>> y array([2, 1, 0]) >>> shuffle(y, n_samples=2, random_state=0) array([0, 1]) See also -------- :func:`sklearn.utils.resample` """ options['replace'] = False return resample(*arrays, **options) def safe_sqr(X, copy=True): """Element wise squaring of array-likes and sparse matrices. Parameters ---------- X : array like, matrix, sparse matrix copy : boolean, optional, default True Whether to create a copy of X and operate on it or to perform inplace computation (default behaviour). Returns ------- X ** 2 : element wise square """ X = check_array(X, accept_sparse=['csr', 'csc', 'coo']) if issparse(X): if copy: X = X.copy() X.data **= 2 else: if copy: X = X ** 2 else: X **= 2 return X def gen_batches(n, batch_size): """Generator to create slices containing batch_size elements, from 0 to n. The last slice may contain less than batch_size elements, when batch_size does not divide n. Examples -------- >>> from sklearn.utils import gen_batches >>> list(gen_batches(7, 3)) [slice(0, 3, None), slice(3, 6, None), slice(6, 7, None)] >>> list(gen_batches(6, 3)) [slice(0, 3, None), slice(3, 6, None)] >>> list(gen_batches(2, 3)) [slice(0, 2, None)] """ start = 0 for _ in range(int(n // batch_size)): end = start + batch_size yield slice(start, end) start = end if start < n: yield slice(start, n) def gen_even_slices(n, n_packs, n_samples=None): """Generator to create n_packs slices going up to n. Pass n_samples when the slices are to be used for sparse matrix indexing; slicing off-the-end raises an exception, while it works for NumPy arrays. Examples -------- >>> from sklearn.utils import gen_even_slices >>> list(gen_even_slices(10, 1)) [slice(0, 10, None)] >>> list(gen_even_slices(10, 10)) #doctest: +ELLIPSIS [slice(0, 1, None), slice(1, 2, None), ..., slice(9, 10, None)] >>> list(gen_even_slices(10, 5)) #doctest: +ELLIPSIS [slice(0, 2, None), slice(2, 4, None), ..., slice(8, 10, None)] >>> list(gen_even_slices(10, 3)) [slice(0, 4, None), slice(4, 7, None), slice(7, 10, None)] """ start = 0 if n_packs < 1: raise ValueError("gen_even_slices got n_packs=%s, must be >=1" % n_packs) for pack_num in range(n_packs): this_n = n // n_packs if pack_num < n % n_packs: this_n += 1 if this_n > 0: end = start + this_n if n_samples is not None: end = min(n_samples, end) yield slice(start, end, None) start = end def _get_n_jobs(n_jobs): """Get number of jobs for the computation. This function reimplements the logic of joblib to determine the actual number of jobs depending on the cpu count. If -1 all CPUs are used. If 1 is given, no parallel computing code is used at all, which is useful for debugging. For n_jobs below -1, (n_cpus + 1 + n_jobs) are used. Thus for n_jobs = -2, all CPUs but one are used. Parameters ---------- n_jobs : int Number of jobs stated in joblib convention. Returns ------- n_jobs : int The actual number of jobs as positive integer. Examples -------- >>> from sklearn.utils import _get_n_jobs >>> _get_n_jobs(4) 4 >>> jobs = _get_n_jobs(-2) >>> assert jobs == max(cpu_count() - 1, 1) >>> _get_n_jobs(0) Traceback (most recent call last): ... ValueError: Parameter n_jobs == 0 has no meaning. """ if n_jobs < 0: return max(cpu_count() + 1 + n_jobs, 1) elif n_jobs == 0: raise ValueError('Parameter n_jobs == 0 has no meaning.') else: return n_jobs def tosequence(x): """Cast iterable x to a Sequence, avoiding a copy if possible.""" if isinstance(x, np.ndarray): return np.asarray(x) elif isinstance(x, Sequence): return x else: return list(x) class ConvergenceWarning(UserWarning): """Custom warning to capture convergence problems""" class DataDimensionalityWarning(UserWarning): """Custom warning to notify potential issues with data dimensionality"""
bsd-3-clause
smartscheduling/scikit-learn-categorical-tree
examples/feature_selection/plot_rfe_with_cross_validation.py
225
1384
""" =================================================== Recursive feature elimination with cross-validation =================================================== A recursive feature elimination example with automatic tuning of the number of features selected with cross-validation. """ print(__doc__) import matplotlib.pyplot as plt from sklearn.svm import SVC from sklearn.cross_validation import StratifiedKFold from sklearn.feature_selection import RFECV from sklearn.datasets import make_classification # Build a classification task using 3 informative features X, y = make_classification(n_samples=1000, n_features=25, n_informative=3, n_redundant=2, n_repeated=0, n_classes=8, n_clusters_per_class=1, random_state=0) # Create the RFE object and compute a cross-validated score. svc = SVC(kernel="linear") # The "accuracy" scoring is proportional to the number of correct # classifications rfecv = RFECV(estimator=svc, step=1, cv=StratifiedKFold(y, 2), scoring='accuracy') rfecv.fit(X, y) print("Optimal number of features : %d" % rfecv.n_features_) # Plot number of features VS. cross-validation scores plt.figure() plt.xlabel("Number of features selected") plt.ylabel("Cross validation score (nb of correct classifications)") plt.plot(range(1, len(rfecv.grid_scores_) + 1), rfecv.grid_scores_) plt.show()
bsd-3-clause
AlexRobson/scikit-learn
sklearn/tests/test_base.py
215
7045
# Author: Gael Varoquaux # License: BSD 3 clause import numpy as np import scipy.sparse as sp from sklearn.utils.testing import assert_array_equal from sklearn.utils.testing import assert_true from sklearn.utils.testing import assert_false from sklearn.utils.testing import assert_equal from sklearn.utils.testing import assert_not_equal from sklearn.utils.testing import assert_raises from sklearn.base import BaseEstimator, clone, is_classifier from sklearn.svm import SVC from sklearn.pipeline import Pipeline from sklearn.grid_search import GridSearchCV from sklearn.utils import deprecated ############################################################################# # A few test classes class MyEstimator(BaseEstimator): def __init__(self, l1=0, empty=None): self.l1 = l1 self.empty = empty class K(BaseEstimator): def __init__(self, c=None, d=None): self.c = c self.d = d class T(BaseEstimator): def __init__(self, a=None, b=None): self.a = a self.b = b class DeprecatedAttributeEstimator(BaseEstimator): def __init__(self, a=None, b=None): self.a = a if b is not None: DeprecationWarning("b is deprecated and renamed 'a'") self.a = b @property @deprecated("Parameter 'b' is deprecated and renamed to 'a'") def b(self): return self._b class Buggy(BaseEstimator): " A buggy estimator that does not set its parameters right. " def __init__(self, a=None): self.a = 1 class NoEstimator(object): def __init__(self): pass def fit(self, X=None, y=None): return self def predict(self, X=None): return None class VargEstimator(BaseEstimator): """Sklearn estimators shouldn't have vargs.""" def __init__(self, *vargs): pass ############################################################################# # The tests def test_clone(): # Tests that clone creates a correct deep copy. # We create an estimator, make a copy of its original state # (which, in this case, is the current state of the estimator), # and check that the obtained copy is a correct deep copy. from sklearn.feature_selection import SelectFpr, f_classif selector = SelectFpr(f_classif, alpha=0.1) new_selector = clone(selector) assert_true(selector is not new_selector) assert_equal(selector.get_params(), new_selector.get_params()) selector = SelectFpr(f_classif, alpha=np.zeros((10, 2))) new_selector = clone(selector) assert_true(selector is not new_selector) def test_clone_2(): # Tests that clone doesn't copy everything. # We first create an estimator, give it an own attribute, and # make a copy of its original state. Then we check that the copy doesn't # have the specific attribute we manually added to the initial estimator. from sklearn.feature_selection import SelectFpr, f_classif selector = SelectFpr(f_classif, alpha=0.1) selector.own_attribute = "test" new_selector = clone(selector) assert_false(hasattr(new_selector, "own_attribute")) def test_clone_buggy(): # Check that clone raises an error on buggy estimators. buggy = Buggy() buggy.a = 2 assert_raises(RuntimeError, clone, buggy) no_estimator = NoEstimator() assert_raises(TypeError, clone, no_estimator) varg_est = VargEstimator() assert_raises(RuntimeError, clone, varg_est) def test_clone_empty_array(): # Regression test for cloning estimators with empty arrays clf = MyEstimator(empty=np.array([])) clf2 = clone(clf) assert_array_equal(clf.empty, clf2.empty) clf = MyEstimator(empty=sp.csr_matrix(np.array([[0]]))) clf2 = clone(clf) assert_array_equal(clf.empty.data, clf2.empty.data) def test_clone_nan(): # Regression test for cloning estimators with default parameter as np.nan clf = MyEstimator(empty=np.nan) clf2 = clone(clf) assert_true(clf.empty is clf2.empty) def test_repr(): # Smoke test the repr of the base estimator. my_estimator = MyEstimator() repr(my_estimator) test = T(K(), K()) assert_equal( repr(test), "T(a=K(c=None, d=None), b=K(c=None, d=None))" ) some_est = T(a=["long_params"] * 1000) assert_equal(len(repr(some_est)), 415) def test_str(): # Smoke test the str of the base estimator my_estimator = MyEstimator() str(my_estimator) def test_get_params(): test = T(K(), K()) assert_true('a__d' in test.get_params(deep=True)) assert_true('a__d' not in test.get_params(deep=False)) test.set_params(a__d=2) assert_true(test.a.d == 2) assert_raises(ValueError, test.set_params, a__a=2) def test_get_params_deprecated(): # deprecated attribute should not show up as params est = DeprecatedAttributeEstimator(a=1) assert_true('a' in est.get_params()) assert_true('a' in est.get_params(deep=True)) assert_true('a' in est.get_params(deep=False)) assert_true('b' not in est.get_params()) assert_true('b' not in est.get_params(deep=True)) assert_true('b' not in est.get_params(deep=False)) def test_is_classifier(): svc = SVC() assert_true(is_classifier(svc)) assert_true(is_classifier(GridSearchCV(svc, {'C': [0.1, 1]}))) assert_true(is_classifier(Pipeline([('svc', svc)]))) assert_true(is_classifier(Pipeline([('svc_cv', GridSearchCV(svc, {'C': [0.1, 1]}))]))) def test_set_params(): # test nested estimator parameter setting clf = Pipeline([("svc", SVC())]) # non-existing parameter in svc assert_raises(ValueError, clf.set_params, svc__stupid_param=True) # non-existing parameter of pipeline assert_raises(ValueError, clf.set_params, svm__stupid_param=True) # we don't currently catch if the things in pipeline are estimators # bad_pipeline = Pipeline([("bad", NoEstimator())]) # assert_raises(AttributeError, bad_pipeline.set_params, # bad__stupid_param=True) def test_score_sample_weight(): from sklearn.tree import DecisionTreeClassifier from sklearn.tree import DecisionTreeRegressor from sklearn import datasets rng = np.random.RandomState(0) # test both ClassifierMixin and RegressorMixin estimators = [DecisionTreeClassifier(max_depth=2), DecisionTreeRegressor(max_depth=2)] sets = [datasets.load_iris(), datasets.load_boston()] for est, ds in zip(estimators, sets): est.fit(ds.data, ds.target) # generate random sample weights sample_weight = rng.randint(1, 10, size=len(ds.target)) # check that the score with and without sample weights are different assert_not_equal(est.score(ds.data, ds.target), est.score(ds.data, ds.target, sample_weight=sample_weight), msg="Unweighted and weighted scores " "are unexpectedly equal")
bsd-3-clause
smartscheduling/scikit-learn-categorical-tree
examples/cluster/plot_agglomerative_clustering_metrics.py
388
4492
""" Agglomerative clustering with different metrics =============================================== Demonstrates the effect of different metrics on the hierarchical clustering. The example is engineered to show the effect of the choice of different metrics. It is applied to waveforms, which can be seen as high-dimensional vector. Indeed, the difference between metrics is usually more pronounced in high dimension (in particular for euclidean and cityblock). We generate data from three groups of waveforms. Two of the waveforms (waveform 1 and waveform 2) are proportional one to the other. The cosine distance is invariant to a scaling of the data, as a result, it cannot distinguish these two waveforms. Thus even with no noise, clustering using this distance will not separate out waveform 1 and 2. We add observation noise to these waveforms. We generate very sparse noise: only 6% of the time points contain noise. As a result, the l1 norm of this noise (ie "cityblock" distance) is much smaller than it's l2 norm ("euclidean" distance). This can be seen on the inter-class distance matrices: the values on the diagonal, that characterize the spread of the class, are much bigger for the Euclidean distance than for the cityblock distance. When we apply clustering to the data, we find that the clustering reflects what was in the distance matrices. Indeed, for the Euclidean distance, the classes are ill-separated because of the noise, and thus the clustering does not separate the waveforms. For the cityblock distance, the separation is good and the waveform classes are recovered. Finally, the cosine distance does not separate at all waveform 1 and 2, thus the clustering puts them in the same cluster. """ # Author: Gael Varoquaux # License: BSD 3-Clause or CC-0 import matplotlib.pyplot as plt import numpy as np from sklearn.cluster import AgglomerativeClustering from sklearn.metrics import pairwise_distances np.random.seed(0) # Generate waveform data n_features = 2000 t = np.pi * np.linspace(0, 1, n_features) def sqr(x): return np.sign(np.cos(x)) X = list() y = list() for i, (phi, a) in enumerate([(.5, .15), (.5, .6), (.3, .2)]): for _ in range(30): phase_noise = .01 * np.random.normal() amplitude_noise = .04 * np.random.normal() additional_noise = 1 - 2 * np.random.rand(n_features) # Make the noise sparse additional_noise[np.abs(additional_noise) < .997] = 0 X.append(12 * ((a + amplitude_noise) * (sqr(6 * (t + phi + phase_noise))) + additional_noise)) y.append(i) X = np.array(X) y = np.array(y) n_clusters = 3 labels = ('Waveform 1', 'Waveform 2', 'Waveform 3') # Plot the ground-truth labelling plt.figure() plt.axes([0, 0, 1, 1]) for l, c, n in zip(range(n_clusters), 'rgb', labels): lines = plt.plot(X[y == l].T, c=c, alpha=.5) lines[0].set_label(n) plt.legend(loc='best') plt.axis('tight') plt.axis('off') plt.suptitle("Ground truth", size=20) # Plot the distances for index, metric in enumerate(["cosine", "euclidean", "cityblock"]): avg_dist = np.zeros((n_clusters, n_clusters)) plt.figure(figsize=(5, 4.5)) for i in range(n_clusters): for j in range(n_clusters): avg_dist[i, j] = pairwise_distances(X[y == i], X[y == j], metric=metric).mean() avg_dist /= avg_dist.max() for i in range(n_clusters): for j in range(n_clusters): plt.text(i, j, '%5.3f' % avg_dist[i, j], verticalalignment='center', horizontalalignment='center') plt.imshow(avg_dist, interpolation='nearest', cmap=plt.cm.gnuplot2, vmin=0) plt.xticks(range(n_clusters), labels, rotation=45) plt.yticks(range(n_clusters), labels) plt.colorbar() plt.suptitle("Interclass %s distances" % metric, size=18) plt.tight_layout() # Plot clustering results for index, metric in enumerate(["cosine", "euclidean", "cityblock"]): model = AgglomerativeClustering(n_clusters=n_clusters, linkage="average", affinity=metric) model.fit(X) plt.figure() plt.axes([0, 0, 1, 1]) for l, c in zip(np.arange(model.n_clusters), 'rgbk'): plt.plot(X[model.labels_ == l].T, c=c, alpha=.5) plt.axis('tight') plt.axis('off') plt.suptitle("AgglomerativeClustering(affinity=%s)" % metric, size=20) plt.show()
bsd-3-clause
wanghaven/nupic
examples/opf/experiments/spatial_classification/category_0/description.py
32
1598
# ---------------------------------------------------------------------- # Numenta Platform for Intelligent Computing (NuPIC) # Copyright (C) 2013, Numenta, Inc. Unless you have an agreement # with Numenta, Inc., for a separate license for this software code, the # following terms and conditions apply: # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero Public License version 3 as # published by the Free Software Foundation. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. # See the GNU Affero Public License for more details. # # You should have received a copy of the GNU Affero Public License # along with this program. If not, see http://www.gnu.org/licenses. # # http://numenta.org/licenses/ # ---------------------------------------------------------------------- ## This file defines parameters for a prediction experiment. import os from nupic.frameworks.opf.expdescriptionhelpers import importBaseDescription # the sub-experiment configuration config = \ { 'dataSource': 'file://' + os.path.join(os.path.dirname(__file__), '../datasets/category_0.csv'), 'errorMetric': 'avg_err', 'modelParams': { 'sensorParams': { 'verbosity': 0}, 'clParams': { 'clVerbosity': 0, }, } } mod = importBaseDescription('../base/description.py', config) locals().update(mod.__dict__)
agpl-3.0
aestrivex/mne-python
examples/realtime/plot_compute_rt_decoder.py
8
3603
""" ======================= Decoding real-time data ======================= Supervised machine learning applied to MEG data in sensor space. Here the classifier is updated every 5 trials and the decoding accuracy is plotted """ # Authors: Mainak Jas <mainak@neuro.hut.fi> # # License: BSD (3-clause) import numpy as np import matplotlib.pyplot as plt import mne from mne.realtime import MockRtClient, RtEpochs from mne.datasets import sample print(__doc__) # Fiff file to simulate the realtime client data_path = sample.data_path() raw_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw.fif' raw = mne.io.Raw(raw_fname, preload=True) tmin, tmax = -0.2, 0.5 event_id = dict(aud_l=1, vis_l=3) tr_percent = 60 # Training percentage min_trials = 10 # minimum trials after which decoding should start # select gradiometers picks = mne.pick_types(raw.info, meg='grad', eeg=False, eog=True, stim=True, exclude=raw.info['bads']) # create the mock-client object rt_client = MockRtClient(raw) # create the real-time epochs object rt_epochs = RtEpochs(rt_client, event_id, tmin, tmax, picks=picks, decim=1, reject=dict(grad=4000e-13, eog=150e-6)) # start the acquisition rt_epochs.start() # send raw buffers rt_client.send_data(rt_epochs, picks, tmin=0, tmax=90, buffer_size=1000) # Decoding in sensor space using a linear SVM n_times = len(rt_epochs.times) from sklearn import preprocessing # noqa from sklearn.svm import SVC # noqa from sklearn.pipeline import Pipeline # noqa from sklearn.cross_validation import cross_val_score, ShuffleSplit # noqa from mne.decoding import ConcatenateChannels, FilterEstimator # noqa scores_x, scores, std_scores = [], [], [] filt = FilterEstimator(rt_epochs.info, 1, 40) scaler = preprocessing.StandardScaler() concatenator = ConcatenateChannels() clf = SVC(C=1, kernel='linear') concat_classifier = Pipeline([('filter', filt), ('concat', concatenator), ('scaler', scaler), ('svm', clf)]) data_picks = mne.pick_types(rt_epochs.info, meg='grad', eeg=False, eog=True, stim=False, exclude=raw.info['bads']) for ev_num, ev in enumerate(rt_epochs.iter_evoked()): print("Just got epoch %d" % (ev_num + 1)) if ev_num == 0: X = ev.data[None, data_picks, :] y = int(ev.comment) # the comment attribute contains the event_id else: X = np.concatenate((X, ev.data[None, data_picks, :]), axis=0) y = np.append(y, int(ev.comment)) if ev_num >= min_trials: cv = ShuffleSplit(len(y), 5, test_size=0.2, random_state=42) scores_t = cross_val_score(concat_classifier, X, y, cv=cv, n_jobs=1) * 100 std_scores.append(scores_t.std()) scores.append(scores_t.mean()) scores_x.append(ev_num) # Plot accuracy plt.clf() plt.plot(scores_x, scores, '+', label="Classif. score") plt.hold(True) plt.plot(scores_x, scores) plt.axhline(50, color='k', linestyle='--', label="Chance level") hyp_limits = (np.asarray(scores) - np.asarray(std_scores), np.asarray(scores) + np.asarray(std_scores)) plt.fill_between(scores_x, hyp_limits[0], y2=hyp_limits[1], color='b', alpha=0.5) plt.xlabel('Trials') plt.ylabel('Classification score (% correct)') plt.xlim([min_trials, 50]) plt.ylim([30, 105]) plt.title('Real-time decoding') plt.show(block=False) plt.pause(0.01) plt.show()
bsd-3-clause
fbossy/SickRage
lib/guessit/transfo/guess_date.py
29
2355
#!/usr/bin/env python # -*- coding: utf-8 -*- # # GuessIt - A library for guessing information from filenames # Copyright (c) 2013 Nicolas Wack <wackou@gmail.com> # # GuessIt is free software; you can redistribute it and/or modify it under # the terms of the Lesser GNU General Public License as published by # the Free Software Foundation; either version 3 of the License, or # (at your option) any later version. # # GuessIt is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # Lesser GNU General Public License for more details. # # You should have received a copy of the Lesser GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # from __future__ import absolute_import, division, print_function, unicode_literals from guessit.containers import DefaultValidator from guessit.plugins.transformers import Transformer from guessit.matcher import GuessFinder from guessit.date import search_date class GuessDate(Transformer): def __init__(self): Transformer.__init__(self, 50) def register_arguments(self, opts, naming_opts, output_opts, information_opts, webservice_opts, other_options): naming_opts.add_argument('-Y', '--date-year-first', action='store_true', dest='date_year_first', default=None, help='If short date is found, consider the first digits as the year.') naming_opts.add_argument('-D', '--date-day-first', action='store_true', dest='date_day_first', default=None, help='If short date is found, consider the second digits as the day.') def supported_properties(self): return ['date'] @staticmethod def guess_date(string, node=None, options=None): date, span = search_date(string, options.get('date_year_first') if options else False, options.get('date_day_first') if options else False) if date and span and DefaultValidator.validate_string(string, span): # ensure we have a separator before and after date return {'date': date}, span return None, None def process(self, mtree, options=None): GuessFinder(self.guess_date, 1.0, self.log, options).process_nodes(mtree.unidentified_leaves())
gpl-3.0
cauchycui/scikit-learn
examples/cluster/plot_kmeans_silhouette_analysis.py
240
5885
""" =============================================================================== Selecting the number of clusters with silhouette analysis on KMeans clustering =============================================================================== Silhouette analysis can be used to study the separation distance between the resulting clusters. The silhouette plot displays a measure of how close each point in one cluster is to points in the neighboring clusters and thus provides a way to assess parameters like number of clusters visually. This measure has a range of [-1, 1]. Silhoette coefficients (as these values are referred to as) near +1 indicate that the sample is far away from the neighboring clusters. A value of 0 indicates that the sample is on or very close to the decision boundary between two neighboring clusters and negative values indicate that those samples might have been assigned to the wrong cluster. In this example the silhouette analysis is used to choose an optimal value for ``n_clusters``. The silhouette plot shows that the ``n_clusters`` value of 3, 5 and 6 are a bad pick for the given data due to the presence of clusters with below average silhouette scores and also due to wide fluctuations in the size of the silhouette plots. Silhouette analysis is more ambivalent in deciding between 2 and 4. Also from the thickness of the silhouette plot the cluster size can be visualized. The silhouette plot for cluster 0 when ``n_clusters`` is equal to 2, is bigger in size owing to the grouping of the 3 sub clusters into one big cluster. However when the ``n_clusters`` is equal to 4, all the plots are more or less of similar thickness and hence are of similar sizes as can be also verified from the labelled scatter plot on the right. """ from __future__ import print_function from sklearn.datasets import make_blobs from sklearn.cluster import KMeans from sklearn.metrics import silhouette_samples, silhouette_score import matplotlib.pyplot as plt import matplotlib.cm as cm import numpy as np print(__doc__) # Generating the sample data from make_blobs # This particular setting has one distict cluster and 3 clusters placed close # together. X, y = make_blobs(n_samples=500, n_features=2, centers=4, cluster_std=1, center_box=(-10.0, 10.0), shuffle=True, random_state=1) # For reproducibility range_n_clusters = [2, 3, 4, 5, 6] for n_clusters in range_n_clusters: # Create a subplot with 1 row and 2 columns fig, (ax1, ax2) = plt.subplots(1, 2) fig.set_size_inches(18, 7) # The 1st subplot is the silhouette plot # The silhouette coefficient can range from -1, 1 but in this example all # lie within [-0.1, 1] ax1.set_xlim([-0.1, 1]) # The (n_clusters+1)*10 is for inserting blank space between silhouette # plots of individual clusters, to demarcate them clearly. ax1.set_ylim([0, len(X) + (n_clusters + 1) * 10]) # Initialize the clusterer with n_clusters value and a random generator # seed of 10 for reproducibility. clusterer = KMeans(n_clusters=n_clusters, random_state=10) cluster_labels = clusterer.fit_predict(X) # The silhouette_score gives the average value for all the samples. # This gives a perspective into the density and separation of the formed # clusters silhouette_avg = silhouette_score(X, cluster_labels) print("For n_clusters =", n_clusters, "The average silhouette_score is :", silhouette_avg) # Compute the silhouette scores for each sample sample_silhouette_values = silhouette_samples(X, cluster_labels) y_lower = 10 for i in range(n_clusters): # Aggregate the silhouette scores for samples belonging to # cluster i, and sort them ith_cluster_silhouette_values = \ sample_silhouette_values[cluster_labels == i] ith_cluster_silhouette_values.sort() size_cluster_i = ith_cluster_silhouette_values.shape[0] y_upper = y_lower + size_cluster_i color = cm.spectral(float(i) / n_clusters) ax1.fill_betweenx(np.arange(y_lower, y_upper), 0, ith_cluster_silhouette_values, facecolor=color, edgecolor=color, alpha=0.7) # Label the silhouette plots with their cluster numbers at the middle ax1.text(-0.05, y_lower + 0.5 * size_cluster_i, str(i)) # Compute the new y_lower for next plot y_lower = y_upper + 10 # 10 for the 0 samples ax1.set_title("The silhouette plot for the various clusters.") ax1.set_xlabel("The silhouette coefficient values") ax1.set_ylabel("Cluster label") # The vertical line for average silhoutte score of all the values ax1.axvline(x=silhouette_avg, color="red", linestyle="--") ax1.set_yticks([]) # Clear the yaxis labels / ticks ax1.set_xticks([-0.1, 0, 0.2, 0.4, 0.6, 0.8, 1]) # 2nd Plot showing the actual clusters formed colors = cm.spectral(cluster_labels.astype(float) / n_clusters) ax2.scatter(X[:, 0], X[:, 1], marker='.', s=30, lw=0, alpha=0.7, c=colors) # Labeling the clusters centers = clusterer.cluster_centers_ # Draw white circles at cluster centers ax2.scatter(centers[:, 0], centers[:, 1], marker='o', c="white", alpha=1, s=200) for i, c in enumerate(centers): ax2.scatter(c[0], c[1], marker='$%d$' % i, alpha=1, s=50) ax2.set_title("The visualization of the clustered data.") ax2.set_xlabel("Feature space for the 1st feature") ax2.set_ylabel("Feature space for the 2nd feature") plt.suptitle(("Silhouette analysis for KMeans clustering on sample data " "with n_clusters = %d" % n_clusters), fontsize=14, fontweight='bold') plt.show()
bsd-3-clause
wanghaven/nupic
tests/swarming/nupic/swarming/experiments/smart_speculation_temporal/description.py
32
16827
# ---------------------------------------------------------------------- # Numenta Platform for Intelligent Computing (NuPIC) # Copyright (C) 2013, Numenta, Inc. Unless you have an agreement # with Numenta, Inc., for a separate license for this software code, the # following terms and conditions apply: # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero Public License version 3 as # published by the Free Software Foundation. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. # See the GNU Affero Public License for more details. # # You should have received a copy of the GNU Affero Public License # along with this program. If not, see http://www.gnu.org/licenses. # # http://numenta.org/licenses/ # ---------------------------------------------------------------------- """ Template file used by the OPF Experiment Generator to generate the actual description.py file by replacing $XXXXXXXX tokens with desired values. This description.py file was generated by: '/Users/ronmarianetti/nupic/eng/lib/python2.6/site-packages/nupic/frameworks/opf/expGenerator/ExpGenerator.py' """ from nupic.frameworks.opf.expdescriptionapi import ExperimentDescriptionAPI from nupic.frameworks.opf.expdescriptionhelpers import ( updateConfigFromSubConfig, applyValueGettersToContainer, DeferredDictLookup) from nupic.frameworks.opf.clamodelcallbacks import * from nupic.frameworks.opf.metrics import MetricSpec from nupic.frameworks.opf.opfutils import (InferenceType, InferenceElement) from nupic.support import aggregationDivide from nupic.frameworks.opf.opftaskdriver import ( IterationPhaseSpecLearnOnly, IterationPhaseSpecInferOnly, IterationPhaseSpecLearnAndInfer) # Model Configuration Dictionary: # # Define the model parameters and adjust for any modifications if imported # from a sub-experiment. # # These fields might be modified by a sub-experiment; this dict is passed # between the sub-experiment and base experiment # # # NOTE: Use of DEFERRED VALUE-GETTERs: dictionary fields and list elements # within the config dictionary may be assigned futures derived from the # ValueGetterBase class, such as DeferredDictLookup. # This facility is particularly handy for enabling substitution of values in # the config dictionary from other values in the config dictionary, which is # needed by permutation.py-based experiments. These values will be resolved # during the call to applyValueGettersToContainer(), # which we call after the base experiment's config dictionary is updated from # the sub-experiment. See ValueGetterBase and # DeferredDictLookup for more details about value-getters. # # For each custom encoder parameter to be exposed to the sub-experiment/ # permutation overrides, define a variable in this section, using key names # beginning with a single underscore character to avoid collisions with # pre-defined keys (e.g., _dsEncoderFieldName2_N). # # Example: # config = dict( # _dsEncoderFieldName2_N = 70, # _dsEncoderFieldName2_W = 5, # dsEncoderSchema = [ # base=dict( # fieldname='Name2', type='ScalarEncoder', # name='Name2', minval=0, maxval=270, clipInput=True, # n=DeferredDictLookup('_dsEncoderFieldName2_N'), # w=DeferredDictLookup('_dsEncoderFieldName2_W')), # ], # ) # updateConfigFromSubConfig(config) # applyValueGettersToContainer(config) config = { # Type of model that the rest of these parameters apply to. 'model': "CLA", # Version that specifies the format of the config. 'version': 1, # Intermediate variables used to compute fields in modelParams and also # referenced from the control section. 'aggregationInfo': { 'days': 0, 'fields': [], 'hours': 0, 'microseconds': 0, 'milliseconds': 0, 'minutes': 0, 'months': 0, 'seconds': 0, 'weeks': 0, 'years': 0}, 'predictAheadTime': None, # Model parameter dictionary. 'modelParams': { # The type of inference that this model will perform 'inferenceType': 'TemporalNextStep', 'sensorParams': { # Sensor diagnostic output verbosity control; # if > 0: sensor region will print out on screen what it's sensing # at each step 0: silent; >=1: some info; >=2: more info; # >=3: even more info (see compute() in py/regions/RecordSensor.py) 'verbosity' : 0, # Example: # dsEncoderSchema = [ # DeferredDictLookup('__field_name_encoder'), # ], # # (value generated from DS_ENCODER_SCHEMA) 'encoders': { u'A': { 'fieldname': u'daynight', 'n': 300, 'name': u'daynight', 'type': 'SDRCategoryEncoder', 'w': 21}, u'B': { 'fieldname': u'daynight', 'n': 300, 'name': u'daynight', 'type': 'SDRCategoryEncoder', 'w': 21}, u'C': { 'fieldname': u'precip', 'n': 300, 'name': u'precip', 'type': 'SDRCategoryEncoder', 'w': 21}, u'D': { 'clipInput': True, 'fieldname': u'visitor_winloss', 'maxval': 0.78600000000000003, 'minval': 0.0, 'n': 150, 'name': u'visitor_winloss', 'type': 'AdaptiveScalarEncoder', 'w': 21}, u'E': { 'clipInput': True, 'fieldname': u'home_winloss', 'maxval': 0.69999999999999996, 'minval': 0.0, 'n': 150, 'name': u'home_winloss', 'type': 'AdaptiveScalarEncoder', 'w': 21}, u'F': { 'dayOfWeek': (7, 1), 'fieldname': u'timestamp', 'name': u'timestamp_dayOfWeek', 'type': 'DateEncoder'}, u'G': { 'fieldname': u'timestamp', 'name': u'timestamp_timeOfDay', 'timeOfDay': (7, 1), 'type': 'DateEncoder'}, u'pred': { 'clipInput': True, 'fieldname': u'attendance', 'maxval': 36067, 'minval': 0, 'n': 150, 'name': u'attendance', 'type': 'AdaptiveScalarEncoder', 'w': 21}}, # A dictionary specifying the period for automatically-generated # resets from a RecordSensor; # # None = disable automatically-generated resets (also disabled if # all of the specified values evaluate to 0). # Valid keys is the desired combination of the following: # days, hours, minutes, seconds, milliseconds, microseconds, weeks # # Example for 1.5 days: sensorAutoReset = dict(days=1,hours=12), # # (value generated from SENSOR_AUTO_RESET) 'sensorAutoReset' : None, }, 'spEnable': True, 'spParams': { # SP diagnostic output verbosity control; # 0: silent; >=1: some info; >=2: more info; 'spVerbosity' : 0, 'globalInhibition': 1, # Number of cell columns in the cortical region (same number for # SP and TP) # (see also tpNCellsPerCol) 'columnCount': 2048, 'inputWidth': 0, # SP inhibition control (absolute value); # Maximum number of active columns in the SP region's output (when # there are more, the weaker ones are suppressed) 'numActiveColumnsPerInhArea': 40, 'seed': 1956, # potentialPct # What percent of the columns's receptive field is available # for potential synapses. At initialization time, we will # choose potentialPct * (2*potentialRadius+1)^2 'potentialPct': 1.0, # The default connected threshold. Any synapse whose # permanence value is above the connected threshold is # a "connected synapse", meaning it can contribute to the # cell's firing. Typical value is 0.10. Cells whose activity # level before inhibition falls below minDutyCycleBeforeInh # will have their own internal synPermConnectedCell # threshold set below this default value. # (This concept applies to both SP and TP and so 'cells' # is correct here as opposed to 'columns') 'synPermConnected': 0.1, 'synPermActiveInc': 0.1, 'synPermInactiveDec': 0.01, }, # Controls whether TP is enabled or disabled; # TP is necessary for making temporal predictions, such as predicting # the next inputs. Without TP, the model is only capable of # reconstructing missing sensor inputs (via SP). 'tpEnable' : True, 'tpParams': { # TP diagnostic output verbosity control; # 0: silent; [1..6]: increasing levels of verbosity # (see verbosity in nupic/trunk/py/nupic/research/TP.py and TP10X*.py) 'verbosity': 0, # Number of cell columns in the cortical region (same number for # SP and TP) # (see also tpNCellsPerCol) 'columnCount': 2048, # The number of cells (i.e., states), allocated per column. 'cellsPerColumn': 32, 'inputWidth': 2048, 'seed': 1960, # Temporal Pooler implementation selector (see _getTPClass in # CLARegion.py). 'temporalImp': 'cpp', # New Synapse formation count # NOTE: If None, use spNumActivePerInhArea # # TODO: need better explanation 'newSynapseCount': 15, # Maximum number of synapses per segment # > 0 for fixed-size CLA # -1 for non-fixed-size CLA # # TODO: for Ron: once the appropriate value is placed in TP # constructor, see if we should eliminate this parameter from # description.py. 'maxSynapsesPerSegment': 32, # Maximum number of segments per cell # > 0 for fixed-size CLA # -1 for non-fixed-size CLA # # TODO: for Ron: once the appropriate value is placed in TP # constructor, see if we should eliminate this parameter from # description.py. 'maxSegmentsPerCell': 128, # Initial Permanence # TODO: need better explanation 'initialPerm': 0.21, # Permanence Increment 'permanenceInc': 0.1, # Permanence Decrement # If set to None, will automatically default to tpPermanenceInc # value. 'permanenceDec' : 0.1, 'globalDecay': 0.0, 'maxAge': 0, # Minimum number of active synapses for a segment to be considered # during search for the best-matching segments. # None=use default # Replaces: tpMinThreshold 'minThreshold': 12, # Segment activation threshold. # A segment is active if it has >= tpSegmentActivationThreshold # connected synapses that are active due to infActiveState # None=use default # Replaces: tpActivationThreshold 'activationThreshold': 16, 'outputType': 'normal', # "Pay Attention Mode" length. This tells the TP how many new # elements to append to the end of a learned sequence at a time. # Smaller values are better for datasets with short sequences, # higher values are better for datasets with long sequences. 'pamLength': 1, }, 'clParams': { 'regionName' : 'CLAClassifierRegion', # Classifier diagnostic output verbosity control; # 0: silent; [1..6]: increasing levels of verbosity 'clVerbosity' : 0, # This controls how fast the classifier learns/forgets. Higher values # make it adapt faster and forget older patterns faster. 'alpha': 0.001, # This is set after the call to updateConfigFromSubConfig and is # computed from the aggregationInfo and predictAheadTime. 'steps': '1', }, 'trainSPNetOnlyIfRequested': False, }, } # end of config dictionary # Adjust base config dictionary for any modifications if imported from a # sub-experiment updateConfigFromSubConfig(config) # Compute predictionSteps based on the predictAheadTime and the aggregation # period, which may be permuted over. if config['predictAheadTime'] is not None: predictionSteps = int(round(aggregationDivide( config['predictAheadTime'], config['aggregationInfo']))) assert (predictionSteps >= 1) config['modelParams']['clParams']['steps'] = str(predictionSteps) # Adjust config by applying ValueGetterBase-derived # futures. NOTE: this MUST be called after updateConfigFromSubConfig() in order # to support value-getter-based substitutions from the sub-experiment (if any) applyValueGettersToContainer(config) control = { # The environment that the current model is being run in "environment": 'nupic', # Input stream specification per py/nupicengine/cluster/database/StreamDef.json. # 'dataset' : { u'info': u'baseball benchmark test', u'streams': [ { u'columns': [ u'daynight', u'precip', u'home_winloss', u'visitor_winloss', u'attendance', u'timestamp'], u'info': u'OAK01.csv', u'source': u'file://extra/baseball_stadium/OAK01reformatted.csv'}], u'version': 1}, # Iteration count: maximum number of iterations. Each iteration corresponds # to one record from the (possibly aggregated) dataset. The task is # terminated when either number of iterations reaches iterationCount or # all records in the (possibly aggregated) database have been processed, # whichever occurs first. # # iterationCount of -1 = iterate over the entire dataset #'iterationCount' : ITERATION_COUNT, # Metrics: A list of MetricSpecs that instantiate the metrics that are # computed for this experiment 'metrics':[ MetricSpec(field=u'attendance', inferenceElement=InferenceElement.prediction, metric='aae', params={'window': 1000}), MetricSpec(field=u'attendance', inferenceElement=InferenceElement.prediction, metric='trivial_aae', params={'window': 1000}), MetricSpec(field=u'attendance', inferenceElement=InferenceElement.prediction, metric='nupicScore_scalar', params={'frequencyWindow': 1000, 'movingAverageWindow': 1000}), MetricSpec(field=u'attendance', inferenceElement=InferenceElement.prediction, metric='nupicScore_scalar', params={'frequencyWindow': 1000}) ], # Logged Metrics: A sequence of regular expressions that specify which of # the metrics from the Inference Specifications section MUST be logged for # every prediction. The regex's correspond to the automatically generated # metric labels. This is similar to the way the optimization metric is # specified in permutations.py. 'loggedMetrics': ['.*nupicScore.*'], } descriptionInterface = ExperimentDescriptionAPI(modelConfig=config, control=control)
agpl-3.0
briancappello/PyTradeLib
pytradelib/quandl/wiki.py
1
2556
import os import sys from pandas.io.common import urlencode as _encode_url from pytradelib.downloader import Downloader from pytradelib.utils import _sanitize_dates, csv_to_df class QuandlDailyWikiProvider(object): def __init__(self, api_key=None, batch_size=20, sleep=20): self._api_key = api_key self._downloader = Downloader(batch_size=batch_size, sleep=sleep) @property def api_key(self): return self._api_key @api_key.setter def api_key(self, api_key): self._api_key = api_key def download(self, symbols, start=None, end=None): if isinstance(symbols, str): url = self._construct_url(symbols, start, end) csv = self._downloader.download(url) return csv_to_df(csv) elif isinstance(symbols, (list, tuple)): urls = [self._construct_url(symbol, start, end) for symbol in symbols] elif isinstance(symbols, dict): urls = [self._construct_url(symbol, d['start'], d['end']) for symbol, d in symbols.items()] else: raise Exception('symbols must be a string, a list of strings, or a dict of string to start/end dates') results = {} for url, csv in self._downloader.download(urls): symbol, df = self._url_to_symbol(url), csv_to_df(csv) results[symbol] = df print('parsed results for ' + symbol) return results def _construct_url(self, symbol, start=None, end=None): """ Get historical data for the given name from quandl. Date format is datetime Returns a DataFrame. """ start, end = _sanitize_dates(start, end) # if no specific dataset was provided, default to free WIKI dataset if '/' not in symbol: symbol = 'WIKI/' + symbol url = 'https://www.quandl.com/api/v3/datasets/%s.csv?' % symbol query_params = {'start_date': start.strftime('%Y-%m-%d'), 'end_date': end.strftime('%Y-%m-%d'), 'collapse': 'daily'} if self._api_key or 'QUANDL_API_KEY' in os.environ: query_params['api_key'] = self._api_key or os.environ['QUANDL_API_KEY'] else: print('Please provide your API key in the constructor, or set the QUANDL_API_KEY environment variable') sys.exit(1) return url + _encode_url(query_params) def _url_to_symbol(self, url): return url[url.rfind('/')+1:url.rfind('.csv')]
gpl-3.0
NelisVerhoef/scikit-learn
examples/cluster/plot_kmeans_digits.py
228
4524
""" =========================================================== A demo of K-Means clustering on the handwritten digits data =========================================================== In this example we compare the various initialization strategies for K-means in terms of runtime and quality of the results. As the ground truth is known here, we also apply different cluster quality metrics to judge the goodness of fit of the cluster labels to the ground truth. Cluster quality metrics evaluated (see :ref:`clustering_evaluation` for definitions and discussions of the metrics): =========== ======================================================== Shorthand full name =========== ======================================================== homo homogeneity score compl completeness score v-meas V measure ARI adjusted Rand index AMI adjusted mutual information silhouette silhouette coefficient =========== ======================================================== """ print(__doc__) from time import time import numpy as np import matplotlib.pyplot as plt from sklearn import metrics from sklearn.cluster import KMeans from sklearn.datasets import load_digits from sklearn.decomposition import PCA from sklearn.preprocessing import scale np.random.seed(42) digits = load_digits() data = scale(digits.data) n_samples, n_features = data.shape n_digits = len(np.unique(digits.target)) labels = digits.target sample_size = 300 print("n_digits: %d, \t n_samples %d, \t n_features %d" % (n_digits, n_samples, n_features)) print(79 * '_') print('% 9s' % 'init' ' time inertia homo compl v-meas ARI AMI silhouette') def bench_k_means(estimator, name, data): t0 = time() estimator.fit(data) print('% 9s %.2fs %i %.3f %.3f %.3f %.3f %.3f %.3f' % (name, (time() - t0), estimator.inertia_, metrics.homogeneity_score(labels, estimator.labels_), metrics.completeness_score(labels, estimator.labels_), metrics.v_measure_score(labels, estimator.labels_), metrics.adjusted_rand_score(labels, estimator.labels_), metrics.adjusted_mutual_info_score(labels, estimator.labels_), metrics.silhouette_score(data, estimator.labels_, metric='euclidean', sample_size=sample_size))) bench_k_means(KMeans(init='k-means++', n_clusters=n_digits, n_init=10), name="k-means++", data=data) bench_k_means(KMeans(init='random', n_clusters=n_digits, n_init=10), name="random", data=data) # in this case the seeding of the centers is deterministic, hence we run the # kmeans algorithm only once with n_init=1 pca = PCA(n_components=n_digits).fit(data) bench_k_means(KMeans(init=pca.components_, n_clusters=n_digits, n_init=1), name="PCA-based", data=data) print(79 * '_') ############################################################################### # Visualize the results on PCA-reduced data reduced_data = PCA(n_components=2).fit_transform(data) kmeans = KMeans(init='k-means++', n_clusters=n_digits, n_init=10) kmeans.fit(reduced_data) # Step size of the mesh. Decrease to increase the quality of the VQ. h = .02 # point in the mesh [x_min, m_max]x[y_min, y_max]. # Plot the decision boundary. For that, we will assign a color to each x_min, x_max = reduced_data[:, 0].min() - 1, reduced_data[:, 0].max() + 1 y_min, y_max = reduced_data[:, 1].min() - 1, reduced_data[:, 1].max() + 1 xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h)) # Obtain labels for each point in mesh. Use last trained model. Z = kmeans.predict(np.c_[xx.ravel(), yy.ravel()]) # Put the result into a color plot Z = Z.reshape(xx.shape) plt.figure(1) plt.clf() plt.imshow(Z, interpolation='nearest', extent=(xx.min(), xx.max(), yy.min(), yy.max()), cmap=plt.cm.Paired, aspect='auto', origin='lower') plt.plot(reduced_data[:, 0], reduced_data[:, 1], 'k.', markersize=2) # Plot the centroids as a white X centroids = kmeans.cluster_centers_ plt.scatter(centroids[:, 0], centroids[:, 1], marker='x', s=169, linewidths=3, color='w', zorder=10) plt.title('K-means clustering on the digits dataset (PCA-reduced data)\n' 'Centroids are marked with white cross') plt.xlim(x_min, x_max) plt.ylim(y_min, y_max) plt.xticks(()) plt.yticks(()) plt.show()
bsd-3-clause
alexeyum/scikit-learn
sklearn/tests/test_grid_search.py
66
28856
""" Testing for grid search module (sklearn.grid_search) """ from collections import Iterable, Sized from sklearn.externals.six.moves import cStringIO as StringIO from sklearn.externals.six.moves import xrange from itertools import chain, product import pickle import warnings import sys import numpy as np import scipy.sparse as sp from sklearn.utils.testing import assert_equal from sklearn.utils.testing import assert_not_equal from sklearn.utils.testing import assert_raises from sklearn.utils.testing import assert_warns from sklearn.utils.testing import assert_raise_message from sklearn.utils.testing import assert_false, assert_true from sklearn.utils.testing import assert_array_equal from sklearn.utils.testing import assert_almost_equal from sklearn.utils.testing import assert_array_almost_equal from sklearn.utils.testing import assert_no_warnings from sklearn.utils.testing import ignore_warnings from sklearn.utils.mocking import CheckingClassifier, MockDataFrame from scipy.stats import bernoulli, expon, uniform from sklearn.externals.six.moves import zip from sklearn.base import BaseEstimator from sklearn.datasets import make_classification from sklearn.datasets import make_blobs from sklearn.datasets import make_multilabel_classification from sklearn.svm import LinearSVC, SVC from sklearn.tree import DecisionTreeRegressor from sklearn.tree import DecisionTreeClassifier from sklearn.cluster import KMeans from sklearn.neighbors import KernelDensity from sklearn.metrics import f1_score from sklearn.metrics import make_scorer from sklearn.metrics import roc_auc_score from sklearn.exceptions import ChangedBehaviorWarning from sklearn.exceptions import FitFailedWarning with warnings.catch_warnings(): warnings.simplefilter('ignore') from sklearn.grid_search import (GridSearchCV, RandomizedSearchCV, ParameterGrid, ParameterSampler) from sklearn.cross_validation import KFold, StratifiedKFold from sklearn.preprocessing import Imputer from sklearn.pipeline import Pipeline # Neither of the following two estimators inherit from BaseEstimator, # to test hyperparameter search on user-defined classifiers. class MockClassifier(object): """Dummy classifier to test the cross-validation""" def __init__(self, foo_param=0): self.foo_param = foo_param def fit(self, X, Y): assert_true(len(X) == len(Y)) return self def predict(self, T): return T.shape[0] predict_proba = predict decision_function = predict transform = predict def score(self, X=None, Y=None): if self.foo_param > 1: score = 1. else: score = 0. return score def get_params(self, deep=False): return {'foo_param': self.foo_param} def set_params(self, **params): self.foo_param = params['foo_param'] return self class LinearSVCNoScore(LinearSVC): """An LinearSVC classifier that has no score method.""" @property def score(self): raise AttributeError X = np.array([[-1, -1], [-2, -1], [1, 1], [2, 1]]) y = np.array([1, 1, 2, 2]) def assert_grid_iter_equals_getitem(grid): assert_equal(list(grid), [grid[i] for i in range(len(grid))]) def test_parameter_grid(): # Test basic properties of ParameterGrid. params1 = {"foo": [1, 2, 3]} grid1 = ParameterGrid(params1) assert_true(isinstance(grid1, Iterable)) assert_true(isinstance(grid1, Sized)) assert_equal(len(grid1), 3) assert_grid_iter_equals_getitem(grid1) params2 = {"foo": [4, 2], "bar": ["ham", "spam", "eggs"]} grid2 = ParameterGrid(params2) assert_equal(len(grid2), 6) # loop to assert we can iterate over the grid multiple times for i in xrange(2): # tuple + chain transforms {"a": 1, "b": 2} to ("a", 1, "b", 2) points = set(tuple(chain(*(sorted(p.items())))) for p in grid2) assert_equal(points, set(("bar", x, "foo", y) for x, y in product(params2["bar"], params2["foo"]))) assert_grid_iter_equals_getitem(grid2) # Special case: empty grid (useful to get default estimator settings) empty = ParameterGrid({}) assert_equal(len(empty), 1) assert_equal(list(empty), [{}]) assert_grid_iter_equals_getitem(empty) assert_raises(IndexError, lambda: empty[1]) has_empty = ParameterGrid([{'C': [1, 10]}, {}, {'C': [.5]}]) assert_equal(len(has_empty), 4) assert_equal(list(has_empty), [{'C': 1}, {'C': 10}, {}, {'C': .5}]) assert_grid_iter_equals_getitem(has_empty) def test_grid_search(): # Test that the best estimator contains the right value for foo_param clf = MockClassifier() grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]}, verbose=3) # make sure it selects the smallest parameter in case of ties old_stdout = sys.stdout sys.stdout = StringIO() grid_search.fit(X, y) sys.stdout = old_stdout assert_equal(grid_search.best_estimator_.foo_param, 2) for i, foo_i in enumerate([1, 2, 3]): assert_true(grid_search.grid_scores_[i][0] == {'foo_param': foo_i}) # Smoke test the score etc: grid_search.score(X, y) grid_search.predict_proba(X) grid_search.decision_function(X) grid_search.transform(X) # Test exception handling on scoring grid_search.scoring = 'sklearn' assert_raises(ValueError, grid_search.fit, X, y) @ignore_warnings def test_grid_search_no_score(): # Test grid-search on classifier that has no score function. clf = LinearSVC(random_state=0) X, y = make_blobs(random_state=0, centers=2) Cs = [.1, 1, 10] clf_no_score = LinearSVCNoScore(random_state=0) grid_search = GridSearchCV(clf, {'C': Cs}, scoring='accuracy') grid_search.fit(X, y) grid_search_no_score = GridSearchCV(clf_no_score, {'C': Cs}, scoring='accuracy') # smoketest grid search grid_search_no_score.fit(X, y) # check that best params are equal assert_equal(grid_search_no_score.best_params_, grid_search.best_params_) # check that we can call score and that it gives the correct result assert_equal(grid_search.score(X, y), grid_search_no_score.score(X, y)) # giving no scoring function raises an error grid_search_no_score = GridSearchCV(clf_no_score, {'C': Cs}) assert_raise_message(TypeError, "no scoring", grid_search_no_score.fit, [[1]]) def test_grid_search_score_method(): X, y = make_classification(n_samples=100, n_classes=2, flip_y=.2, random_state=0) clf = LinearSVC(random_state=0) grid = {'C': [.1]} search_no_scoring = GridSearchCV(clf, grid, scoring=None).fit(X, y) search_accuracy = GridSearchCV(clf, grid, scoring='accuracy').fit(X, y) search_no_score_method_auc = GridSearchCV(LinearSVCNoScore(), grid, scoring='roc_auc').fit(X, y) search_auc = GridSearchCV(clf, grid, scoring='roc_auc').fit(X, y) # Check warning only occurs in situation where behavior changed: # estimator requires score method to compete with scoring parameter score_no_scoring = assert_no_warnings(search_no_scoring.score, X, y) score_accuracy = assert_warns(ChangedBehaviorWarning, search_accuracy.score, X, y) score_no_score_auc = assert_no_warnings(search_no_score_method_auc.score, X, y) score_auc = assert_warns(ChangedBehaviorWarning, search_auc.score, X, y) # ensure the test is sane assert_true(score_auc < 1.0) assert_true(score_accuracy < 1.0) assert_not_equal(score_auc, score_accuracy) assert_almost_equal(score_accuracy, score_no_scoring) assert_almost_equal(score_auc, score_no_score_auc) def test_trivial_grid_scores(): # Test search over a "grid" with only one point. # Non-regression test: grid_scores_ wouldn't be set by GridSearchCV. clf = MockClassifier() grid_search = GridSearchCV(clf, {'foo_param': [1]}) grid_search.fit(X, y) assert_true(hasattr(grid_search, "grid_scores_")) random_search = RandomizedSearchCV(clf, {'foo_param': [0]}, n_iter=1) random_search.fit(X, y) assert_true(hasattr(random_search, "grid_scores_")) def test_no_refit(): # Test that grid search can be used for model selection only clf = MockClassifier() grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]}, refit=False) grid_search.fit(X, y) assert_true(hasattr(grid_search, "best_params_")) def test_grid_search_error(): # Test that grid search will capture errors on data with different # length X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0) clf = LinearSVC() cv = GridSearchCV(clf, {'C': [0.1, 1.0]}) assert_raises(ValueError, cv.fit, X_[:180], y_) def test_grid_search_iid(): # test the iid parameter # noise-free simple 2d-data X, y = make_blobs(centers=[[0, 0], [1, 0], [0, 1], [1, 1]], random_state=0, cluster_std=0.1, shuffle=False, n_samples=80) # split dataset into two folds that are not iid # first one contains data of all 4 blobs, second only from two. mask = np.ones(X.shape[0], dtype=np.bool) mask[np.where(y == 1)[0][::2]] = 0 mask[np.where(y == 2)[0][::2]] = 0 # this leads to perfect classification on one fold and a score of 1/3 on # the other svm = SVC(kernel='linear') # create "cv" for splits cv = [[mask, ~mask], [~mask, mask]] # once with iid=True (default) grid_search = GridSearchCV(svm, param_grid={'C': [1, 10]}, cv=cv) grid_search.fit(X, y) first = grid_search.grid_scores_[0] assert_equal(first.parameters['C'], 1) assert_array_almost_equal(first.cv_validation_scores, [1, 1. / 3.]) # for first split, 1/4 of dataset is in test, for second 3/4. # take weighted average assert_almost_equal(first.mean_validation_score, 1 * 1. / 4. + 1. / 3. * 3. / 4.) # once with iid=False grid_search = GridSearchCV(svm, param_grid={'C': [1, 10]}, cv=cv, iid=False) grid_search.fit(X, y) first = grid_search.grid_scores_[0] assert_equal(first.parameters['C'], 1) # scores are the same as above assert_array_almost_equal(first.cv_validation_scores, [1, 1. / 3.]) # averaged score is just mean of scores assert_almost_equal(first.mean_validation_score, np.mean(first.cv_validation_scores)) def test_grid_search_one_grid_point(): X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0) param_dict = {"C": [1.0], "kernel": ["rbf"], "gamma": [0.1]} clf = SVC() cv = GridSearchCV(clf, param_dict) cv.fit(X_, y_) clf = SVC(C=1.0, kernel="rbf", gamma=0.1) clf.fit(X_, y_) assert_array_equal(clf.dual_coef_, cv.best_estimator_.dual_coef_) def test_grid_search_bad_param_grid(): param_dict = {"C": 1.0} clf = SVC() assert_raises(ValueError, GridSearchCV, clf, param_dict) param_dict = {"C": []} clf = SVC() assert_raises(ValueError, GridSearchCV, clf, param_dict) param_dict = {"C": np.ones(6).reshape(3, 2)} clf = SVC() assert_raises(ValueError, GridSearchCV, clf, param_dict) def test_grid_search_sparse(): # Test that grid search works with both dense and sparse matrices X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0) clf = LinearSVC() cv = GridSearchCV(clf, {'C': [0.1, 1.0]}) cv.fit(X_[:180], y_[:180]) y_pred = cv.predict(X_[180:]) C = cv.best_estimator_.C X_ = sp.csr_matrix(X_) clf = LinearSVC() cv = GridSearchCV(clf, {'C': [0.1, 1.0]}) cv.fit(X_[:180].tocoo(), y_[:180]) y_pred2 = cv.predict(X_[180:]) C2 = cv.best_estimator_.C assert_true(np.mean(y_pred == y_pred2) >= .9) assert_equal(C, C2) def test_grid_search_sparse_scoring(): X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0) clf = LinearSVC() cv = GridSearchCV(clf, {'C': [0.1, 1.0]}, scoring="f1") cv.fit(X_[:180], y_[:180]) y_pred = cv.predict(X_[180:]) C = cv.best_estimator_.C X_ = sp.csr_matrix(X_) clf = LinearSVC() cv = GridSearchCV(clf, {'C': [0.1, 1.0]}, scoring="f1") cv.fit(X_[:180], y_[:180]) y_pred2 = cv.predict(X_[180:]) C2 = cv.best_estimator_.C assert_array_equal(y_pred, y_pred2) assert_equal(C, C2) # Smoke test the score # np.testing.assert_allclose(f1_score(cv.predict(X_[:180]), y[:180]), # cv.score(X_[:180], y[:180])) # test loss where greater is worse def f1_loss(y_true_, y_pred_): return -f1_score(y_true_, y_pred_) F1Loss = make_scorer(f1_loss, greater_is_better=False) cv = GridSearchCV(clf, {'C': [0.1, 1.0]}, scoring=F1Loss) cv.fit(X_[:180], y_[:180]) y_pred3 = cv.predict(X_[180:]) C3 = cv.best_estimator_.C assert_equal(C, C3) assert_array_equal(y_pred, y_pred3) def test_grid_search_precomputed_kernel(): # Test that grid search works when the input features are given in the # form of a precomputed kernel matrix X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0) # compute the training kernel matrix corresponding to the linear kernel K_train = np.dot(X_[:180], X_[:180].T) y_train = y_[:180] clf = SVC(kernel='precomputed') cv = GridSearchCV(clf, {'C': [0.1, 1.0]}) cv.fit(K_train, y_train) assert_true(cv.best_score_ >= 0) # compute the test kernel matrix K_test = np.dot(X_[180:], X_[:180].T) y_test = y_[180:] y_pred = cv.predict(K_test) assert_true(np.mean(y_pred == y_test) >= 0) # test error is raised when the precomputed kernel is not array-like # or sparse assert_raises(ValueError, cv.fit, K_train.tolist(), y_train) def test_grid_search_precomputed_kernel_error_nonsquare(): # Test that grid search returns an error with a non-square precomputed # training kernel matrix K_train = np.zeros((10, 20)) y_train = np.ones((10, )) clf = SVC(kernel='precomputed') cv = GridSearchCV(clf, {'C': [0.1, 1.0]}) assert_raises(ValueError, cv.fit, K_train, y_train) def test_grid_search_precomputed_kernel_error_kernel_function(): # Test that grid search returns an error when using a kernel_function X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0) kernel_function = lambda x1, x2: np.dot(x1, x2.T) clf = SVC(kernel=kernel_function) cv = GridSearchCV(clf, {'C': [0.1, 1.0]}) assert_raises(ValueError, cv.fit, X_, y_) class BrokenClassifier(BaseEstimator): """Broken classifier that cannot be fit twice""" def __init__(self, parameter=None): self.parameter = parameter def fit(self, X, y): assert_true(not hasattr(self, 'has_been_fit_')) self.has_been_fit_ = True def predict(self, X): return np.zeros(X.shape[0]) @ignore_warnings def test_refit(): # Regression test for bug in refitting # Simulates re-fitting a broken estimator; this used to break with # sparse SVMs. X = np.arange(100).reshape(10, 10) y = np.array([0] * 5 + [1] * 5) clf = GridSearchCV(BrokenClassifier(), [{'parameter': [0, 1]}], scoring="precision", refit=True) clf.fit(X, y) def test_gridsearch_nd(): # Pass X as list in GridSearchCV X_4d = np.arange(10 * 5 * 3 * 2).reshape(10, 5, 3, 2) y_3d = np.arange(10 * 7 * 11).reshape(10, 7, 11) check_X = lambda x: x.shape[1:] == (5, 3, 2) check_y = lambda x: x.shape[1:] == (7, 11) clf = CheckingClassifier(check_X=check_X, check_y=check_y) grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]}) grid_search.fit(X_4d, y_3d).score(X, y) assert_true(hasattr(grid_search, "grid_scores_")) def test_X_as_list(): # Pass X as list in GridSearchCV X = np.arange(100).reshape(10, 10) y = np.array([0] * 5 + [1] * 5) clf = CheckingClassifier(check_X=lambda x: isinstance(x, list)) cv = KFold(n=len(X), n_folds=3) grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]}, cv=cv) grid_search.fit(X.tolist(), y).score(X, y) assert_true(hasattr(grid_search, "grid_scores_")) def test_y_as_list(): # Pass y as list in GridSearchCV X = np.arange(100).reshape(10, 10) y = np.array([0] * 5 + [1] * 5) clf = CheckingClassifier(check_y=lambda x: isinstance(x, list)) cv = KFold(n=len(X), n_folds=3) grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]}, cv=cv) grid_search.fit(X, y.tolist()).score(X, y) assert_true(hasattr(grid_search, "grid_scores_")) def test_pandas_input(): # check cross_val_score doesn't destroy pandas dataframe types = [(MockDataFrame, MockDataFrame)] try: from pandas import Series, DataFrame types.append((DataFrame, Series)) except ImportError: pass X = np.arange(100).reshape(10, 10) y = np.array([0] * 5 + [1] * 5) for InputFeatureType, TargetType in types: # X dataframe, y series X_df, y_ser = InputFeatureType(X), TargetType(y) check_df = lambda x: isinstance(x, InputFeatureType) check_series = lambda x: isinstance(x, TargetType) clf = CheckingClassifier(check_X=check_df, check_y=check_series) grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]}) grid_search.fit(X_df, y_ser).score(X_df, y_ser) grid_search.predict(X_df) assert_true(hasattr(grid_search, "grid_scores_")) def test_unsupervised_grid_search(): # test grid-search with unsupervised estimator X, y = make_blobs(random_state=0) km = KMeans(random_state=0) grid_search = GridSearchCV(km, param_grid=dict(n_clusters=[2, 3, 4]), scoring='adjusted_rand_score') grid_search.fit(X, y) # ARI can find the right number :) assert_equal(grid_search.best_params_["n_clusters"], 3) # Now without a score, and without y grid_search = GridSearchCV(km, param_grid=dict(n_clusters=[2, 3, 4])) grid_search.fit(X) assert_equal(grid_search.best_params_["n_clusters"], 4) def test_gridsearch_no_predict(): # test grid-search with an estimator without predict. # slight duplication of a test from KDE def custom_scoring(estimator, X): return 42 if estimator.bandwidth == .1 else 0 X, _ = make_blobs(cluster_std=.1, random_state=1, centers=[[0, 1], [1, 0], [0, 0]]) search = GridSearchCV(KernelDensity(), param_grid=dict(bandwidth=[.01, .1, 1]), scoring=custom_scoring) search.fit(X) assert_equal(search.best_params_['bandwidth'], .1) assert_equal(search.best_score_, 42) def test_param_sampler(): # test basic properties of param sampler param_distributions = {"kernel": ["rbf", "linear"], "C": uniform(0, 1)} sampler = ParameterSampler(param_distributions=param_distributions, n_iter=10, random_state=0) samples = [x for x in sampler] assert_equal(len(samples), 10) for sample in samples: assert_true(sample["kernel"] in ["rbf", "linear"]) assert_true(0 <= sample["C"] <= 1) def test_randomized_search_grid_scores(): # Make a dataset with a lot of noise to get various kind of prediction # errors across CV folds and parameter settings X, y = make_classification(n_samples=200, n_features=100, n_informative=3, random_state=0) # XXX: as of today (scipy 0.12) it's not possible to set the random seed # of scipy.stats distributions: the assertions in this test should thus # not depend on the randomization params = dict(C=expon(scale=10), gamma=expon(scale=0.1)) n_cv_iter = 3 n_search_iter = 30 search = RandomizedSearchCV(SVC(), n_iter=n_search_iter, cv=n_cv_iter, param_distributions=params, iid=False) search.fit(X, y) assert_equal(len(search.grid_scores_), n_search_iter) # Check consistency of the structure of each cv_score item for cv_score in search.grid_scores_: assert_equal(len(cv_score.cv_validation_scores), n_cv_iter) # Because we set iid to False, the mean_validation score is the # mean of the fold mean scores instead of the aggregate sample-wise # mean score assert_almost_equal(np.mean(cv_score.cv_validation_scores), cv_score.mean_validation_score) assert_equal(list(sorted(cv_score.parameters.keys())), list(sorted(params.keys()))) # Check the consistency with the best_score_ and best_params_ attributes sorted_grid_scores = list(sorted(search.grid_scores_, key=lambda x: x.mean_validation_score)) best_score = sorted_grid_scores[-1].mean_validation_score assert_equal(search.best_score_, best_score) tied_best_params = [s.parameters for s in sorted_grid_scores if s.mean_validation_score == best_score] assert_true(search.best_params_ in tied_best_params, "best_params_={0} is not part of the" " tied best models: {1}".format( search.best_params_, tied_best_params)) def test_grid_search_score_consistency(): # test that correct scores are used clf = LinearSVC(random_state=0) X, y = make_blobs(random_state=0, centers=2) Cs = [.1, 1, 10] for score in ['f1', 'roc_auc']: grid_search = GridSearchCV(clf, {'C': Cs}, scoring=score) grid_search.fit(X, y) cv = StratifiedKFold(n_folds=3, y=y) for C, scores in zip(Cs, grid_search.grid_scores_): clf.set_params(C=C) scores = scores[2] # get the separate runs from grid scores i = 0 for train, test in cv: clf.fit(X[train], y[train]) if score == "f1": correct_score = f1_score(y[test], clf.predict(X[test])) elif score == "roc_auc": dec = clf.decision_function(X[test]) correct_score = roc_auc_score(y[test], dec) assert_almost_equal(correct_score, scores[i]) i += 1 def test_pickle(): # Test that a fit search can be pickled clf = MockClassifier() grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]}, refit=True) grid_search.fit(X, y) pickle.dumps(grid_search) # smoke test random_search = RandomizedSearchCV(clf, {'foo_param': [1, 2, 3]}, refit=True, n_iter=3) random_search.fit(X, y) pickle.dumps(random_search) # smoke test def test_grid_search_with_multioutput_data(): # Test search with multi-output estimator X, y = make_multilabel_classification(random_state=0) est_parameters = {"max_depth": [1, 2, 3, 4]} cv = KFold(y.shape[0], random_state=0) estimators = [DecisionTreeRegressor(random_state=0), DecisionTreeClassifier(random_state=0)] # Test with grid search cv for est in estimators: grid_search = GridSearchCV(est, est_parameters, cv=cv) grid_search.fit(X, y) for parameters, _, cv_validation_scores in grid_search.grid_scores_: est.set_params(**parameters) for i, (train, test) in enumerate(cv): est.fit(X[train], y[train]) correct_score = est.score(X[test], y[test]) assert_almost_equal(correct_score, cv_validation_scores[i]) # Test with a randomized search for est in estimators: random_search = RandomizedSearchCV(est, est_parameters, cv=cv, n_iter=3) random_search.fit(X, y) for parameters, _, cv_validation_scores in random_search.grid_scores_: est.set_params(**parameters) for i, (train, test) in enumerate(cv): est.fit(X[train], y[train]) correct_score = est.score(X[test], y[test]) assert_almost_equal(correct_score, cv_validation_scores[i]) def test_predict_proba_disabled(): # Test predict_proba when disabled on estimator. X = np.arange(20).reshape(5, -1) y = [0, 0, 1, 1, 1] clf = SVC(probability=False) gs = GridSearchCV(clf, {}, cv=2).fit(X, y) assert_false(hasattr(gs, "predict_proba")) def test_grid_search_allows_nans(): # Test GridSearchCV with Imputer X = np.arange(20, dtype=np.float64).reshape(5, -1) X[2, :] = np.nan y = [0, 0, 1, 1, 1] p = Pipeline([ ('imputer', Imputer(strategy='mean', missing_values='NaN')), ('classifier', MockClassifier()), ]) GridSearchCV(p, {'classifier__foo_param': [1, 2, 3]}, cv=2).fit(X, y) class FailingClassifier(BaseEstimator): """Classifier that raises a ValueError on fit()""" FAILING_PARAMETER = 2 def __init__(self, parameter=None): self.parameter = parameter def fit(self, X, y=None): if self.parameter == FailingClassifier.FAILING_PARAMETER: raise ValueError("Failing classifier failed as required") def predict(self, X): return np.zeros(X.shape[0]) def test_grid_search_failing_classifier(): # GridSearchCV with on_error != 'raise' # Ensures that a warning is raised and score reset where appropriate. X, y = make_classification(n_samples=20, n_features=10, random_state=0) clf = FailingClassifier() # refit=False because we only want to check that errors caused by fits # to individual folds will be caught and warnings raised instead. If # refit was done, then an exception would be raised on refit and not # caught by grid_search (expected behavior), and this would cause an # error in this test. gs = GridSearchCV(clf, [{'parameter': [0, 1, 2]}], scoring='accuracy', refit=False, error_score=0.0) assert_warns(FitFailedWarning, gs.fit, X, y) # Ensure that grid scores were set to zero as required for those fits # that are expected to fail. assert all(np.all(this_point.cv_validation_scores == 0.0) for this_point in gs.grid_scores_ if this_point.parameters['parameter'] == FailingClassifier.FAILING_PARAMETER) gs = GridSearchCV(clf, [{'parameter': [0, 1, 2]}], scoring='accuracy', refit=False, error_score=float('nan')) assert_warns(FitFailedWarning, gs.fit, X, y) assert all(np.all(np.isnan(this_point.cv_validation_scores)) for this_point in gs.grid_scores_ if this_point.parameters['parameter'] == FailingClassifier.FAILING_PARAMETER) def test_grid_search_failing_classifier_raise(): # GridSearchCV with on_error == 'raise' raises the error X, y = make_classification(n_samples=20, n_features=10, random_state=0) clf = FailingClassifier() # refit=False because we want to test the behaviour of the grid search part gs = GridSearchCV(clf, [{'parameter': [0, 1, 2]}], scoring='accuracy', refit=False, error_score='raise') # FailingClassifier issues a ValueError so this is what we look for. assert_raises(ValueError, gs.fit, X, y) def test_parameters_sampler_replacement(): # raise error if n_iter too large params = {'first': [0, 1], 'second': ['a', 'b', 'c']} sampler = ParameterSampler(params, n_iter=7) assert_raises(ValueError, list, sampler) # degenerates to GridSearchCV if n_iter the same as grid_size sampler = ParameterSampler(params, n_iter=6) samples = list(sampler) assert_equal(len(samples), 6) for values in ParameterGrid(params): assert_true(values in samples) # test sampling without replacement in a large grid params = {'a': range(10), 'b': range(10), 'c': range(10)} sampler = ParameterSampler(params, n_iter=99, random_state=42) samples = list(sampler) assert_equal(len(samples), 99) hashable_samples = ["a%db%dc%d" % (p['a'], p['b'], p['c']) for p in samples] assert_equal(len(set(hashable_samples)), 99) # doesn't go into infinite loops params_distribution = {'first': bernoulli(.5), 'second': ['a', 'b', 'c']} sampler = ParameterSampler(params_distribution, n_iter=7) samples = list(sampler) assert_equal(len(samples), 7)
bsd-3-clause
snnn/tensorflow
tensorflow/contrib/eager/python/evaluator.py
26
13672
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Class Evaluator holds Metrics for the duration of an evaluation run.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import six from tensorflow.contrib.eager.python import datasets from tensorflow.contrib.eager.python import metrics from tensorflow.python.eager import context from tensorflow.python.eager import function from tensorflow.python.framework import errors_impl from tensorflow.python.framework import ops from tensorflow.python.ops import control_flow_ops from tensorflow.python.ops import summary_ops_v2 as summary_ops class Evaluator(object): """This holds and updates Metrics for the duration of a single eval run. Usage: evaluator = my_model.evaluator() # or MyEvaluator(my_model) for example_batch in ...: evaluator(example_batch) results = evaluator.all_metric_results(optional_summary_logdir) Or, if you are getting your examples from a tf.data.Dataset, you can use the evaluate_on_dataset() method. Implementers of Evaluators should (a) Call `track_metric()` and/or `track_evaluator()` in __init__(). (b) Override the `call()` method. It will be passed the output of the model's `eval_data()` method, and should call its contained metrics (treating them as callables) and any child Evaluators (using their call() method to avoid calling eval_data() again). Args: model: A `Model` object with an `eval_data()` method. """ def __init__(self, model): self._model = model self._metrics = {} self._evaluators = {} if not context.executing_eagerly(): self.call = function.defun(self.call) # ---- API for users ---- def __call__(self, *args, **kwargs): """Update metrics with a minibatch of input examples. Args: *args: **kwargs: Arguments representing an input mini-batch of examples to pass to self.model.eval_data(). Returns: The op to execute or None if executing eagerly. """ return self.call(self._model.eval_data(*args, **kwargs)) def init_variables(self): """Return an op for initializing all contained uninitialized variables. Only for graph execution. Should be called after variables are created in the first execution of __call__(). Returns: An op. Raises: RuntimeError: if eager execution is enabled. @compatibility(eager) Only for graph execution. @end_compatibility """ if context.executing_eagerly(): raise RuntimeError("Evaluator.init_variables() not needed when " "eager execution is enabled.") return control_flow_ops.group([m.init_variables() for _, m in self.metrics]) def all_metric_results(self, summary_logdir=None): """Computes results for all contained metrics. Args: summary_logdir: An optional string. If specified, metric results will be written as summaries to this directory. Returns: A `dict` mapping string names to tensors. """ if summary_logdir is None: with summary_ops.never_record_summaries(): return self._all_metric_results() else: def f(): with summary_ops.create_file_writer( summary_logdir).as_default(), summary_ops.always_record_summaries(): return self._all_metric_results() if context.executing_eagerly(): return f() else: return function.defun(f)() def _all_metric_results(self): """Implementation of `all_metric_results` in the summary context.""" results = {} for name, metric in six.iteritems(self._metrics): results[name] = metric.result() for prefix, evaluator in six.iteritems(self._evaluators): for name, metric in six.iteritems(evaluator._metrics): # pylint: disable=protected-access results[prefix + "/" + name] = metric.result() return results def evaluate_on_dataset(self, dataset, *args, **kwargs): """Convenience method for performing an eval on a Dataset. Args: dataset: Dataset object with the input data to evaluate on. *args: **kwargs: Optional additional arguments to __call__(), except `summary_logdir`: if specified, metrics will be written as summaries to this directory. Returns: @compatibility(eager) When eager execution is enabled, this returns the result of performing an evaluation as a dictionary. With graph execution, this returns a tuple (init_op, call_op, results_op) which may be executed using this code: ```python sess.run(init_op) try: while True: sess.run(call_op) except tf.errors.OutOfRangeError: pass return sess.run(results_op) # A dictionary # equivalently: return evaluator.run_evaluation(init_op, call_op, results_op, sess=sess) ``` @end_compatibility """ summary_logdir = kwargs.pop("summary_logdir", None) if context.executing_eagerly(): for example in datasets.Iterator(dataset): self.__call__(example, *args, **kwargs) return self.all_metric_results(summary_logdir) # Graph construction call_op = self.__call__(dataset.make_one_shot_iterator().get_next(), *args, **kwargs) init_op = self.init_variables() results_op = self.all_metric_results(summary_logdir) return (init_op, call_op, results_op) @staticmethod def run_evaluation(init_op, call_op, results_op, sess=None): """Convenience method for running the ops returned by evaluate_on_dataset. Args: init_op: An op that initializes/resets evaluation state. call_op: An op that updates evaluation state on a mini-batch of examples. Must generate an tf.errors.OutOfRangeError when done. results_op: A dictionary of tensors that compute the final evaluation results from the evaluation state. sess: The Session to run the evaluation in. Defaults to the default Session. Returns: A dictionary of values, parallel to results_op. Raises: RuntimeError: if eager execution is enabled. @compatibility(eager) Only for graph execution. @end_compatibility """ if context.executing_eagerly(): raise RuntimeError("Evaluator.run_evaluation() not supported when " "eager execution is enabled.") sess = sess or ops.get_default_session() sess.run(init_op) try: while True: sess.run(call_op) except errors_impl.OutOfRangeError: pass return sess.run(results_op) # ---- To be implemented by descendants --- def call(self, eval_data): """Update metrics using the output of self.model. Note: This function is executed as a graph function in graph mode. This means: a) Operations on the same resource are executed in textual order. This should make it easier to do things like add the updated value of a variable to another, for example. b) You don't need to worry about collecting the update ops to execute. All update ops added to the graph by this function will be executed. As a result, code should generally work the same way with graph or eager execution. Args: eval_data: The output of self.model.eval_data() on a mini-batch of examples. """ raise NotImplementedError("Evaluators must define a call member function.") # ---- For use by descendants --- @property def model(self): return self._model def track_metric(self, metric): """Add a Metric to be tracked. Metrics can only be tracked by one `Evaluator`. Metrics must be tracked or they will not appear in `all_metric_results()`. Args: metric: A `Metric` object. Returns: The `metric` passed into this function. Raises: RuntimeError: If called before __init__. TypeError: If `metric` is not of the correct type. ValueError: If there is a name collision between Metrics or `metric` has already been added to another `Evaluator`. """ if not hasattr(self, "_metrics"): raise RuntimeError( "Need to call Evaluator.__init__ before adding metrics") if not isinstance(metric, metrics.Metric): raise TypeError( "Evaluator.track_metric() passed type %s, not a tfe.metrics.Metric" % (type(metric),)) if metric.name in self._metrics: if metric is self._metrics[metric.name]: return metric raise ValueError( "Attempt to add two Metrics with the name '%s' to the same Evaluator " "'%s'" % (metric.name, self.name)) # pylint: disable=protected-access if hasattr(metric, "_added_to_an_evaluator"): raise ValueError("Metric %s already added to Evaluator %s" % (metric.name, metric._added_to_an_evaluator)) metric._added_to_an_evaluator = self.__class__.__name__ # pylint: enable=protected-access self._metrics[metric.name] = metric return metric def track_evaluator(self, prefix, evaluator): """Add a contained `Evaluator`. This is for delegating to another `Evaluator`, e.g. for when you have a model with multiple heads. Users should manually invoke the child `Evaluator`'s `call` method from their `call` method. Args: prefix: A string. Metrics from `evaluator` are exported with this prefix and a '/'. evaluator: An `Evaluator` object. Returns: The value of `evaluator` passed into this function. Raises: RuntimeError: If called before __init__. TypeError: If `evaluator` is not of the correct type. ValueError: If an `Evaluator` has already been added with that `prefix`. """ if not hasattr(self, "_evaluators"): raise RuntimeError( "Need to call Evaluator.__init__ before adding evaluators") if not isinstance(evaluator, Evaluator): raise TypeError( "Evaluator.track_evaluator() passed type %s, not a tfe.Evaluator." % (type(evaluator),)) if prefix in self._evaluators: if evaluator is self._evaluators[prefix]: return evaluator raise RuntimeError( "Attempt to add two Evaluators with the same prefix '%s'." % prefix) self._evaluators[prefix] = evaluator return evaluator @property def metric_variables(self): v = [] for metric in six.itervalues(self._metrics): v += metric.variables for evaluator in six.itervalues(self._evaluators): v += evaluator.metric_variables return v @property def metrics(self): """Returns a list of (prefix, metric) pairs.""" m = [] for metric in six.itervalues(self._metrics): m.append(("", metric)) for prefix, evaluator in six.iteritems(self._evaluators): m += [(prefix + "/" + p, m) for p, m in evaluator.metrics] return m class SparseSoftmaxEvaluator(Evaluator): """Evaluator for a sparse softmax model. Computes a standard set of metrics for single-label, multi-class models. Args: model: A `SparseSoftmaxModel` object or a `Model` whose `eval_data()` method produces a `dict` containing values for the loss, true label, predicted class, and optional weights. loss_key: Optional key for looking up the value of the loss in the `eval_data()` dict. Defaults to "loss". label_key: Optional key for looking up the value of the label in the `eval_data()` dict. Defaults to "label". predicted_class_key: Optional key for looking up the value of the predicted class in the `eval_data()` dict. Defaults to "predicted_class". weights_key: Optional key for looking up the value of the weights in the `eval_data()` dict. Defaults to "weights". Note that weights are optional, and default to 1 if not present in `eval_data`. """ def __init__(self, model, loss_key="loss", label_key="label", predicted_class_key="predicted_class", weights_key="weights"): super(SparseSoftmaxEvaluator, self).__init__(model) # TODO(josh11b): Expand this to include everything from the standard # SparseSoftmax Head. self.avg_loss = self.track_metric(metrics.Mean("Avg Loss")) self.accuracy = self.track_metric(metrics.Accuracy()) self.loss_key = loss_key self.label_key = label_key self.predicted_class_key = predicted_class_key self.weights_key = weights_key def call(self, eval_data): """Update metrics for `eval_data` dict (described above).""" weights = eval_data.get(self.weights_key, None) if weights is None: self.avg_loss(eval_data[self.loss_key]) self.accuracy(eval_data[self.label_key], eval_data[self.predicted_class_key]) else: self.avg_loss(eval_data[self.loss_key], weights=weights) self.accuracy(eval_data[self.label_key], eval_data[self.predicted_class_key], weights=weights)
apache-2.0
lucidfrontier45/scikit-learn
examples/svm/plot_separating_hyperplane_unbalanced.py
5
1363
""" ================================================= SVM: Separating hyperplane for unbalanced classes ================================================= Find the optimal separating hyperplane using an SVC for classes that are unbalanced. We first find the separating plane with a plain SVC and then plot (dashed) the separating hyperplane with automatically correction for unbalanced classes. """ print __doc__ import numpy as np import pylab as pl from sklearn import svm # we create 40 separable points rng = np.random.RandomState(0) n_samples_1 = 1000 n_samples_2 = 100 X = np.r_[1.5 * rng.randn(n_samples_1, 2), 0.5 * rng.randn(n_samples_2, 2) + [2, 2]] y = [0] * (n_samples_1) + [1] * (n_samples_2) # fit the model and get the separating hyperplane clf = svm.SVC(kernel='linear', C=1.0) clf.fit(X, y) w = clf.coef_[0] a = -w[0] / w[1] xx = np.linspace(-5, 5) yy = a * xx - clf.intercept_[0] / w[1] # get the separating hyperplane using weighted classes wclf = svm.SVC(kernel='linear', class_weight={1: 10}) wclf.fit(X, y) ww = wclf.coef_[0] wa = -ww[0] / ww[1] wyy = wa * xx - wclf.intercept_[0] / ww[1] # plot separating hyperplanes and samples h0 = pl.plot(xx, yy, 'k-', label='no weights') h1 = pl.plot(xx, wyy, 'k--', label='with weights') pl.scatter(X[:, 0], X[:, 1], c=y, cmap=pl.cm.Paired) pl.legend() pl.axis('tight') pl.show()
bsd-3-clause
pandaproject/panda
panda/migrations/0031_rename_dataset_related_stories.py
6
14078
# -*- coding: utf-8 -*- import datetime from south.db import db from south.v2 import SchemaMigration from django.db import models class Migration(SchemaMigration): def forwards(self, orm): db.rename_column('panda_dataset', 'related_stories', 'related_links') def backwards(self, orm): db.rename_column('panda_dataset', 'related_links', 'related_stories') models = { 'auth.group': { 'Meta': {'object_name': 'Group'}, 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}), 'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}) }, 'auth.permission': { 'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'}, 'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}) }, 'auth.user': { 'Meta': {'object_name': 'User'}, 'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}), 'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}), 'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}), 'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}), 'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}), 'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}) }, 'contenttypes.contenttype': { 'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"}, 'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}) }, 'panda.activitylog': { 'Meta': {'unique_together': "(('user', 'when'),)", 'object_name': 'ActivityLog'}, 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'activity_logs'", 'to': "orm['auth.User']"}), 'when': ('django.db.models.fields.DateField', [], {'auto_now': 'True', 'blank': 'True'}) }, 'panda.category': { 'Meta': {'object_name': 'Category'}, 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '64'}), 'slug': ('django.db.models.fields.SlugField', [], {'max_length': '256'}) }, 'panda.dataset': { 'Meta': {'ordering': "['-creation_date']", 'object_name': 'Dataset'}, 'categories': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'datasets'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['panda.Category']"}), 'column_schema': ('panda.fields.JSONField', [], {'default': 'None', 'null': 'True'}), 'creation_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}), 'creator': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'datasets'", 'to': "orm['auth.User']"}), 'current_task': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['panda.TaskStatus']", 'null': 'True', 'blank': 'True'}), 'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'initial_upload': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'initial_upload_for'", 'null': 'True', 'to': "orm['panda.DataUpload']"}), 'last_modification': ('django.db.models.fields.TextField', [], {'default': 'None', 'null': 'True', 'blank': 'True'}), 'last_modified': ('django.db.models.fields.DateTimeField', [], {'default': 'None', 'null': 'True', 'blank': 'True'}), 'last_modified_by': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'}), 'locked': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'locked_at': ('django.db.models.fields.DateTimeField', [], {'default': 'None', 'null': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '256'}), 'related_links': ('panda.fields.JSONField', [], {'default': '[]'}), 'row_count': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}), 'sample_data': ('panda.fields.JSONField', [], {'default': 'None', 'null': 'True'}), 'slug': ('django.db.models.fields.SlugField', [], {'max_length': '256'}) }, 'panda.dataupload': { 'Meta': {'ordering': "['creation_date']", 'object_name': 'DataUpload'}, 'columns': ('panda.fields.JSONField', [], {'null': 'True'}), 'creation_date': ('django.db.models.fields.DateTimeField', [], {}), 'creator': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}), 'data_type': ('django.db.models.fields.CharField', [], {'max_length': '4', 'null': 'True', 'blank': 'True'}), 'dataset': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'data_uploads'", 'null': 'True', 'to': "orm['panda.Dataset']"}), 'dialect': ('panda.fields.JSONField', [], {'null': 'True'}), 'encoding': ('django.db.models.fields.CharField', [], {'default': "'utf-8'", 'max_length': '32'}), 'filename': ('django.db.models.fields.CharField', [], {'max_length': '256'}), 'guessed_types': ('panda.fields.JSONField', [], {'null': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'imported': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'original_filename': ('django.db.models.fields.CharField', [], {'max_length': '256'}), 'sample_data': ('panda.fields.JSONField', [], {'null': 'True'}), 'size': ('django.db.models.fields.IntegerField', [], {}), 'title': ('django.db.models.fields.TextField', [], {'max_length': '256'}) }, 'panda.export': { 'Meta': {'ordering': "['creation_date']", 'object_name': 'Export'}, 'creation_date': ('django.db.models.fields.DateTimeField', [], {}), 'creator': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}), 'dataset': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'exports'", 'null': 'True', 'to': "orm['panda.Dataset']"}), 'filename': ('django.db.models.fields.CharField', [], {'max_length': '256'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'original_filename': ('django.db.models.fields.CharField', [], {'max_length': '256'}), 'size': ('django.db.models.fields.IntegerField', [], {}), 'title': ('django.db.models.fields.TextField', [], {'max_length': '256'}) }, 'panda.notification': { 'Meta': {'ordering': "['-sent_at']", 'object_name': 'Notification'}, 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'message': ('django.db.models.fields.TextField', [], {}), 'read_at': ('django.db.models.fields.DateTimeField', [], {'default': 'None', 'null': 'True', 'blank': 'True'}), 'recipient': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'notifications'", 'to': "orm['auth.User']"}), 'sent_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}), 'type': ('django.db.models.fields.CharField', [], {'default': "'Info'", 'max_length': '16'}), 'url': ('django.db.models.fields.URLField', [], {'default': 'None', 'max_length': '200', 'null': 'True'}) }, 'panda.relatedupload': { 'Meta': {'ordering': "['creation_date']", 'object_name': 'RelatedUpload'}, 'creation_date': ('django.db.models.fields.DateTimeField', [], {}), 'creator': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}), 'dataset': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'related_uploads'", 'to': "orm['panda.Dataset']"}), 'filename': ('django.db.models.fields.CharField', [], {'max_length': '256'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'original_filename': ('django.db.models.fields.CharField', [], {'max_length': '256'}), 'size': ('django.db.models.fields.IntegerField', [], {}), 'title': ('django.db.models.fields.TextField', [], {'max_length': '256'}) }, 'panda.searchlog': { 'Meta': {'object_name': 'SearchLog'}, 'dataset': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'related_name': "'searches'", 'null': 'True', 'to': "orm['panda.Dataset']"}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'query': ('django.db.models.fields.CharField', [], {'max_length': '256'}), 'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'search_logs'", 'to': "orm['auth.User']"}), 'when': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}) }, 'panda.searchsubscription': { 'Meta': {'object_name': 'SearchSubscription'}, 'category': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'related_name': "'search_subscriptions'", 'null': 'True', 'to': "orm['panda.Category']"}), 'dataset': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'related_name': "'search_subscriptions'", 'null': 'True', 'to': "orm['panda.Dataset']"}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'last_run': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}), 'query': ('django.db.models.fields.CharField', [], {'max_length': '256'}), 'query_human': ('django.db.models.fields.TextField', [], {}), 'query_url': ('django.db.models.fields.CharField', [], {'max_length': '256'}), 'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'search_subscriptions'", 'to': "orm['auth.User']"}) }, 'panda.taskstatus': { 'Meta': {'object_name': 'TaskStatus'}, 'creator': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'tasks'", 'null': 'True', 'to': "orm['auth.User']"}), 'end': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'message': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}), 'start': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}), 'status': ('django.db.models.fields.CharField', [], {'default': "'PENDING'", 'max_length': '50'}), 'task_description': ('django.db.models.fields.TextField', [], {}), 'task_name': ('django.db.models.fields.CharField', [], {'max_length': '255'}), 'traceback': ('django.db.models.fields.TextField', [], {'default': 'None', 'null': 'True', 'blank': 'True'}) }, 'panda.userprofile': { 'Meta': {'object_name': 'UserProfile'}, 'activation_key': ('django.db.models.fields.CharField', [], {'max_length': '40', 'null': 'True', 'blank': 'True'}), 'activation_key_expiration': ('django.db.models.fields.DateTimeField', [], {}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'show_login_help': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'user': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['auth.User']", 'unique': 'True'}) } } complete_apps = ['panda']
mit
markusnagel/fuel
fuel/converters/__init__.py
2
1299
"""Data conversion modules for built-in datasets. Conversion submodules generate an HDF5 file that is compatible with their corresponding built-in dataset. Conversion functions accept a single argument, `subparser`, which is an `argparse.ArgumentParser` instance that it needs to fill with its own specific arguments. They should set a `func` default argument for the subparser with a function that will get called and given the parsed command-line arguments, and is expected to download the required files. """ from fuel.converters import adult from fuel.converters import binarized_mnist from fuel.converters import caltech101_silhouettes from fuel.converters import cifar10 from fuel.converters import cifar100 from fuel.converters import iris from fuel.converters import mnist from fuel.converters import svhn from fuel.converters import ilsvrc2010 __version__ = '0.2' all_converters = ( ('adult', adult.fill_subparser), ('binarized_mnist', binarized_mnist.fill_subparser), ('caltech101_silhouettes', caltech101_silhouettes.fill_subparser), ('cifar10', cifar10.fill_subparser), ('cifar100', cifar100.fill_subparser), ('iris', iris.fill_subparser), ('mnist', mnist.fill_subparser), ('svhn', svhn.fill_subparser), ('ilsvrc2010', ilsvrc2010.fill_subparser))
mit
xiawei0000/Kinectforactiondetect
TheanoDL/LogisticRegressionMNIST.py
2
15142
""" This tutorial introduces logistic regression using Theano and stochastic gradient descent. Logistic regression is a probabilistic, linear classifier. It is parametrized by a weight matrix :math:`W` and a bias vector :math:`b`. Classification is done by projecting data points onto a set of hyperplanes, the distance to which is used to determine a class membership probability. Mathematically, this can be written as: .. math:: P(Y=i|x, W,b) &= softmax_i(W x + b) \\ &= \frac {e^{W_i x + b_i}} {\sum_j e^{W_j x + b_j}} The output of the model or prediction is then done by taking the argmax of the vector whose i'th element is P(Y=i|x). .. math:: y_{pred} = argmax_i P(Y=i|x,W,b) This tutorial presents a stochastic gradient descent optimization method suitable for large datasets, and a conjugate gradient optimization method that is suitable for smaller datasets. References: - textbooks: "Pattern Recognition and Machine Learning" - Christopher M. Bishop, section 4.3.2 """ __docformat__ = 'restructedtext en' import cPickle import gzip import os import sys import time import numpy import theano import theano.tensor as T class LogisticRegression(object): """Multi-class Logistic Regression Class The logistic regression is fully described by a weight matrix :math:`W` and bias vector :math:`b`. Classification is done by projecting data points onto a set of hyperplanes, the distance to which is used to determine a class membership probability. """ def __init__(self, input, n_in, n_out): """ Initialize the parameters of the logistic regression :type input: theano.tensor.TensorType :param input: symbolic variable that describes the input of the architecture (one minibatch) :type n_in: int :param n_in: number of input units, the dimension of the space in which the datapoints lie :type n_out: int :param n_out: number of output units, the dimension of the space in which the labels lie """ # initialize with 0 the weights W as a matrix of shape (n_in, n_out) self.W = theano.shared(value=numpy.zeros((n_in, n_out), dtype=theano.config.floatX), name='W', borrow=True) # initialize the baises b as a vector of n_out 0s self.b = theano.shared(value=numpy.zeros((n_out,), dtype=theano.config.floatX), name='b', borrow=True) # compute vector of class-membership probabilities in symbolic form self.p_y_given_x = T.nnet.softmax(T.dot(input, self.W) + self.b) # compute prediction as class whose probability is maximal in # symbolic form self.y_pred = T.argmax(self.p_y_given_x, axis=1) # parameters of the model self.params = [self.W, self.b] def negative_log_likelihood(self, y): """Return the mean of the negative log-likelihood of the prediction of this model under a given target distribution. .. math:: \frac{1}{|\mathcal{D}|} \mathcal{L} (\theta=\{W,b\}, \mathcal{D}) = \frac{1}{|\mathcal{D}|} \sum_{i=0}^{|\mathcal{D}|} \log(P(Y=y^{(i)}|x^{(i)}, W,b)) \\ \ell (\theta=\{W,b\}, \mathcal{D}) :type y: theano.tensor.TensorType :param y: corresponds to a vector that gives for each example the correct label Note: we use the mean instead of the sum so that the learning rate is less dependent on the batch size """ # y.shape[0] is (symbolically) the number of rows in y, i.e., # number of examples (call it n) in the minibatch # T.arange(y.shape[0]) is a symbolic vector which will contain # [0,1,2,... n-1] T.log(self.p_y_given_x) is a matrix of # Log-Probabilities (call it LP) with one row per example and # one column per class LP[T.arange(y.shape[0]),y] is a vector # v containing [LP[0,y[0]], LP[1,y[1]], LP[2,y[2]], ..., # LP[n-1,y[n-1]]] and T.mean(LP[T.arange(y.shape[0]),y]) is # the mean (across minibatch examples) of the elements in v, # i.e., the mean log-likelihood across the minibatch. return -T.mean(T.log(self.p_y_given_x)[T.arange(y.shape[0]), y]) def errors(self, y): """Return a float representing the number of errors in the minibatch over the total number of examples of the minibatch ; zero one loss over the size of the minibatch :type y: theano.tensor.TensorType :param y: corresponds to a vector that gives for each example the correct label """ # check if y has same dimension of y_pred if y.ndim != self.y_pred.ndim: raise TypeError('y should have the same shape as self.y_pred', ('y', target.type, 'y_pred', self.y_pred.type)) # check if y is of the correct datatype if y.dtype.startswith('int'): # the T.neq operator returns a vector of 0s and 1s, where 1 # represents a mistake in prediction return T.mean(T.neq(self.y_pred, y)) else: raise NotImplementedError() def load_data(dataset): ''' Loads the dataset :type dataset: string :param dataset: the path to the dataset (here MNIST) ''' ############# # LOAD DATA # ############# # Download the MNIST dataset if it is not present data_dir, data_file = os.path.split(dataset) if data_dir == "" and not os.path.isfile(dataset): # Check if dataset is in the data directory. new_path = os.path.join(os.path.split(__file__)[0], "..", "data", dataset) if os.path.isfile(new_path) or data_file == 'mnist.pkl.gz': dataset = new_path if (not os.path.isfile(dataset)) and data_file == 'mnist.pkl.gz': import urllib origin = 'http://www.iro.umontreal.ca/~lisa/deep/data/mnist/mnist.pkl.gz' print 'Downloading data from %s' % origin urllib.urlretrieve(origin, dataset) print '... loading data' # Load the dataset f = gzip.open(dataset, 'rb') train_set, valid_set, test_set = cPickle.load(f) f.close() #train_set, valid_set, test_set format: tuple(input, target) #input is an numpy.ndarray of 2 dimensions (a matrix) #witch row's correspond to an example. target is a #numpy.ndarray of 1 dimensions (vector)) that have the same length as #the number of rows in the input. It should give the target #target to the example with the same index in the input. def shared_dataset(data_xy, borrow=True): """ Function that loads the dataset into shared variables The reason we store our dataset in shared variables is to allow Theano to copy it into the GPU memory (when code is run on GPU). Since copying data into the GPU is slow, copying a minibatch everytime is needed (the default behaviour if the data is not in a shared variable) would lead to a large decrease in performance. """ data_x, data_y = data_xy shared_x = theano.shared(numpy.asarray(data_x, dtype=theano.config.floatX), borrow=borrow) shared_y = theano.shared(numpy.asarray(data_y, dtype=theano.config.floatX), borrow=borrow) # When storing data on the GPU it has to be stored as floats # therefore we will store the labels as ``floatX`` as well # (``shared_y`` does exactly that). But during our computations # we need them as ints (we use labels as index, and if they are # floats it doesn't make sense) therefore instead of returning # ``shared_y`` we will have to cast it to int. This little hack # lets ous get around this issue return shared_x, T.cast(shared_y, 'int32') test_set_x, test_set_y = shared_dataset(test_set) valid_set_x, valid_set_y = shared_dataset(valid_set) train_set_x, train_set_y = shared_dataset(train_set) rval = [(train_set_x, train_set_y), (valid_set_x, valid_set_y), (test_set_x, test_set_y)] return rval def sgd_optimization_mnist(learning_rate=0.13, n_epochs=1000, dataset='mnist.pkl.gz', batch_size=600): """ Demonstrate stochastic gradient descent optimization of a log-linear model This is demonstrated on MNIST. :type learning_rate: float :param learning_rate: learning rate used (factor for the stochastic gradient) :type n_epochs: int :param n_epochs: maximal number of epochs to run the optimizer :type dataset: string :param dataset: the path of the MNIST dataset file from http://www.iro.umontreal.ca/~lisa/deep/data/mnist/mnist.pkl.gz """ datasets = load_data(dataset) train_set_x, train_set_y = datasets[0] valid_set_x, valid_set_y = datasets[1] test_set_x, test_set_y = datasets[2] # compute number of minibatches for training, validation and testing n_train_batches = train_set_x.get_value(borrow=True).shape[0] / batch_size n_valid_batches = valid_set_x.get_value(borrow=True).shape[0] / batch_size n_test_batches = test_set_x.get_value(borrow=True).shape[0] / batch_size ###################### # BUILD ACTUAL MODEL # ###################### print '... building the model' # allocate symbolic variables for the data index = T.lscalar() # index to a [mini]batch x = T.matrix('x') # the data is presented as rasterized images y = T.ivector('y') # the labels are presented as 1D vector of # [int] labels # construct the logistic regression class # Each MNIST image has size 28*28 classifier = LogisticRegression(input=x, n_in=28 * 28, n_out=10) # the cost we minimize during training is the negative log likelihood of # the model in symbolic format cost = classifier.negative_log_likelihood(y) # compiling a Theano function that computes the mistakes that are made by # the model on a minibatch test_model = theano.function(inputs=[index], outputs=classifier.errors(y), givens={ x: test_set_x[index * batch_size: (index + 1) * batch_size], y: test_set_y[index * batch_size: (index + 1) * batch_size]}) validate_model = theano.function(inputs=[index], outputs=classifier.errors(y), givens={ x: valid_set_x[index * batch_size:(index + 1) * batch_size], y: valid_set_y[index * batch_size:(index + 1) * batch_size]}) # compute the gradient of cost with respect to theta = (W,b) g_W = T.grad(cost=cost, wrt=classifier.W) g_b = T.grad(cost=cost, wrt=classifier.b) # specify how to update the parameters of the model as a list of # (variable, update expression) pairs. updates = [(classifier.W, classifier.W - learning_rate * g_W), (classifier.b, classifier.b - learning_rate * g_b)] # compiling a Theano function `train_model` that returns the cost, but in # the same time updates the parameter of the model based on the rules # defined in `updates` train_model = theano.function(inputs=[index], outputs=cost, updates=updates, givens={ x: train_set_x[index * batch_size:(index + 1) * batch_size], y: train_set_y[index * batch_size:(index + 1) * batch_size]}) ############### # TRAIN MODEL # ############### print '... training the model' # early-stopping parameters patience = 5000 # look as this many examples regardless patience_increase = 2 # wait this much longer when a new best is # found improvement_threshold = 0.995 # a relative improvement of this much is # considered significant validation_frequency = min(n_train_batches, patience / 2) # go through this many # minibatche before checking the network # on the validation set; in this case we # check every epoch best_params = None best_validation_loss = numpy.inf test_score = 0. start_time = time.clock() done_looping = False epoch = 0 while (epoch < n_epochs) and (not done_looping): epoch = epoch + 1 for minibatch_index in xrange(n_train_batches): minibatch_avg_cost = train_model(minibatch_index) # iteration number iter = (epoch - 1) * n_train_batches + minibatch_index if (iter + 1) % validation_frequency == 0: # compute zero-one loss on validation set validation_losses = [validate_model(i) for i in xrange(n_valid_batches)] this_validation_loss = numpy.mean(validation_losses) print('epoch %i, minibatch %i/%i, validation error %f %%' % \ (epoch, minibatch_index + 1, n_train_batches, this_validation_loss * 100.)) # if we got the best validation score until now if this_validation_loss < best_validation_loss: #improve patience if loss improvement is good enough if this_validation_loss < best_validation_loss * \ improvement_threshold: patience = max(patience, iter * patience_increase) best_validation_loss = this_validation_loss # test it on the test set test_losses = [test_model(i) for i in xrange(n_test_batches)] test_score = numpy.mean(test_losses) print((' epoch %i, minibatch %i/%i, test error of best' ' model %f %%') % (epoch, minibatch_index + 1, n_train_batches, test_score * 100.)) if patience <= iter: done_looping = True break end_time = time.clock() print(('Optimization complete with best validation score of %f %%,' 'with test performance %f %%') % (best_validation_loss * 100., test_score * 100.)) print 'The code run for %d epochs, with %f epochs/sec' % ( epoch, 1. * epoch / (end_time - start_time)) print >> sys.stderr, ('The code for file ' + os.path.split(__file__)[1] + ' ran for %.1fs' % ((end_time - start_time))) if __name__ == '__main__': sgd_optimization_mnist()
mit
npuichigo/ttsflow
third_party/tensorflow/tensorflow/examples/tutorials/input_fn/boston.py
75
2920
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """DNNRegressor with custom input_fn for Housing dataset.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import itertools import pandas as pd import tensorflow as tf tf.logging.set_verbosity(tf.logging.INFO) COLUMNS = ["crim", "zn", "indus", "nox", "rm", "age", "dis", "tax", "ptratio", "medv"] FEATURES = ["crim", "zn", "indus", "nox", "rm", "age", "dis", "tax", "ptratio"] LABEL = "medv" def get_input_fn(data_set, num_epochs=None, shuffle=True): return tf.estimator.inputs.pandas_input_fn( x=pd.DataFrame({k: data_set[k].values for k in FEATURES}), y=pd.Series(data_set[LABEL].values), num_epochs=num_epochs, shuffle=shuffle) def main(unused_argv): # Load datasets training_set = pd.read_csv("boston_train.csv", skipinitialspace=True, skiprows=1, names=COLUMNS) test_set = pd.read_csv("boston_test.csv", skipinitialspace=True, skiprows=1, names=COLUMNS) # Set of 6 examples for which to predict median house values prediction_set = pd.read_csv("boston_predict.csv", skipinitialspace=True, skiprows=1, names=COLUMNS) # Feature cols feature_cols = [tf.feature_column.numeric_column(k) for k in FEATURES] # Build 2 layer fully connected DNN with 10, 10 units respectively. regressor = tf.estimator.DNNRegressor(feature_columns=feature_cols, hidden_units=[10, 10], model_dir="/tmp/boston_model") # Train regressor.train(input_fn=get_input_fn(training_set), steps=5000) # Evaluate loss over one epoch of test_set. ev = regressor.evaluate( input_fn=get_input_fn(test_set, num_epochs=1, shuffle=False)) loss_score = ev["loss"] print("Loss: {0:f}".format(loss_score)) # Print out predictions over a slice of prediction_set. y = regressor.predict( input_fn=get_input_fn(prediction_set, num_epochs=1, shuffle=False)) # .predict() returns an iterator of dicts; convert to a list and print # predictions predictions = list(p["predictions"] for p in itertools.islice(y, 6)) print("Predictions: {}".format(str(predictions))) if __name__ == "__main__": tf.app.run()
apache-2.0
alexeyum/scikit-learn
sklearn/metrics/tests/test_ranking.py
31
41905
from __future__ import division, print_function import numpy as np from itertools import product import warnings from scipy.sparse import csr_matrix from sklearn import datasets from sklearn import svm from sklearn import ensemble from sklearn.datasets import make_multilabel_classification from sklearn.random_projection import sparse_random_matrix from sklearn.utils.validation import check_array, check_consistent_length from sklearn.utils.validation import check_random_state from sklearn.utils.testing import assert_raises, clean_warning_registry from sklearn.utils.testing import assert_raise_message from sklearn.utils.testing import assert_equal from sklearn.utils.testing import assert_almost_equal from sklearn.utils.testing import assert_array_equal from sklearn.utils.testing import assert_array_almost_equal from sklearn.utils.testing import assert_warns from sklearn.metrics import auc from sklearn.metrics import average_precision_score from sklearn.metrics import coverage_error from sklearn.metrics import label_ranking_average_precision_score from sklearn.metrics import precision_recall_curve from sklearn.metrics import label_ranking_loss from sklearn.metrics import roc_auc_score from sklearn.metrics import roc_curve from sklearn.exceptions import UndefinedMetricWarning ############################################################################### # Utilities for testing def make_prediction(dataset=None, binary=False): """Make some classification predictions on a toy dataset using a SVC If binary is True restrict to a binary classification problem instead of a multiclass classification problem """ if dataset is None: # import some data to play with dataset = datasets.load_iris() X = dataset.data y = dataset.target if binary: # restrict to a binary classification task X, y = X[y < 2], y[y < 2] n_samples, n_features = X.shape p = np.arange(n_samples) rng = check_random_state(37) rng.shuffle(p) X, y = X[p], y[p] half = int(n_samples / 2) # add noisy features to make the problem harder and avoid perfect results rng = np.random.RandomState(0) X = np.c_[X, rng.randn(n_samples, 200 * n_features)] # run classifier, get class probabilities and label predictions clf = svm.SVC(kernel='linear', probability=True, random_state=0) probas_pred = clf.fit(X[:half], y[:half]).predict_proba(X[half:]) if binary: # only interested in probabilities of the positive case # XXX: do we really want a special API for the binary case? probas_pred = probas_pred[:, 1] y_pred = clf.predict(X[half:]) y_true = y[half:] return y_true, y_pred, probas_pred ############################################################################### # Tests def _auc(y_true, y_score): """Alternative implementation to check for correctness of `roc_auc_score`.""" pos_label = np.unique(y_true)[1] # Count the number of times positive samples are correctly ranked above # negative samples. pos = y_score[y_true == pos_label] neg = y_score[y_true != pos_label] diff_matrix = pos.reshape(1, -1) - neg.reshape(-1, 1) n_correct = np.sum(diff_matrix > 0) return n_correct / float(len(pos) * len(neg)) def _average_precision(y_true, y_score): """Alternative implementation to check for correctness of `average_precision_score`.""" pos_label = np.unique(y_true)[1] n_pos = np.sum(y_true == pos_label) order = np.argsort(y_score)[::-1] y_score = y_score[order] y_true = y_true[order] score = 0 for i in range(len(y_score)): if y_true[i] == pos_label: # Compute precision up to document i # i.e, percentage of relevant documents up to document i. prec = 0 for j in range(0, i + 1): if y_true[j] == pos_label: prec += 1.0 prec /= (i + 1.0) score += prec return score / n_pos def test_roc_curve(): # Test Area under Receiver Operating Characteristic (ROC) curve y_true, _, probas_pred = make_prediction(binary=True) expected_auc = _auc(y_true, probas_pred) for drop in [True, False]: fpr, tpr, thresholds = roc_curve(y_true, probas_pred, drop_intermediate=drop) roc_auc = auc(fpr, tpr) assert_array_almost_equal(roc_auc, expected_auc, decimal=2) assert_almost_equal(roc_auc, roc_auc_score(y_true, probas_pred)) assert_equal(fpr.shape, tpr.shape) assert_equal(fpr.shape, thresholds.shape) def test_roc_curve_end_points(): # Make sure that roc_curve returns a curve start at 0 and ending and # 1 even in corner cases rng = np.random.RandomState(0) y_true = np.array([0] * 50 + [1] * 50) y_pred = rng.randint(3, size=100) fpr, tpr, thr = roc_curve(y_true, y_pred, drop_intermediate=True) assert_equal(fpr[0], 0) assert_equal(fpr[-1], 1) assert_equal(fpr.shape, tpr.shape) assert_equal(fpr.shape, thr.shape) def test_roc_returns_consistency(): # Test whether the returned threshold matches up with tpr # make small toy dataset y_true, _, probas_pred = make_prediction(binary=True) fpr, tpr, thresholds = roc_curve(y_true, probas_pred) # use the given thresholds to determine the tpr tpr_correct = [] for t in thresholds: tp = np.sum((probas_pred >= t) & y_true) p = np.sum(y_true) tpr_correct.append(1.0 * tp / p) # compare tpr and tpr_correct to see if the thresholds' order was correct assert_array_almost_equal(tpr, tpr_correct, decimal=2) assert_equal(fpr.shape, tpr.shape) assert_equal(fpr.shape, thresholds.shape) def test_roc_nonrepeating_thresholds(): # Test to ensure that we don't return spurious repeating thresholds. # Duplicated thresholds can arise due to machine precision issues. dataset = datasets.load_digits() X = dataset['data'] y = dataset['target'] # This random forest classifier can only return probabilities # significant to two decimal places clf = ensemble.RandomForestClassifier(n_estimators=100, random_state=0) # How well can the classifier predict whether a digit is less than 5? # This task contributes floating point roundoff errors to the probabilities train, test = slice(None, None, 2), slice(1, None, 2) probas_pred = clf.fit(X[train], y[train]).predict_proba(X[test]) y_score = probas_pred[:, :5].sum(axis=1) # roundoff errors begin here y_true = [yy < 5 for yy in y[test]] # Check for repeating values in the thresholds fpr, tpr, thresholds = roc_curve(y_true, y_score, drop_intermediate=False) assert_equal(thresholds.size, np.unique(np.round(thresholds, 2)).size) def test_roc_curve_multi(): # roc_curve not applicable for multi-class problems y_true, _, probas_pred = make_prediction(binary=False) assert_raises(ValueError, roc_curve, y_true, probas_pred) def test_roc_curve_confidence(): # roc_curve for confidence scores y_true, _, probas_pred = make_prediction(binary=True) fpr, tpr, thresholds = roc_curve(y_true, probas_pred - 0.5) roc_auc = auc(fpr, tpr) assert_array_almost_equal(roc_auc, 0.90, decimal=2) assert_equal(fpr.shape, tpr.shape) assert_equal(fpr.shape, thresholds.shape) def test_roc_curve_hard(): # roc_curve for hard decisions y_true, pred, probas_pred = make_prediction(binary=True) # always predict one trivial_pred = np.ones(y_true.shape) fpr, tpr, thresholds = roc_curve(y_true, trivial_pred) roc_auc = auc(fpr, tpr) assert_array_almost_equal(roc_auc, 0.50, decimal=2) assert_equal(fpr.shape, tpr.shape) assert_equal(fpr.shape, thresholds.shape) # always predict zero trivial_pred = np.zeros(y_true.shape) fpr, tpr, thresholds = roc_curve(y_true, trivial_pred) roc_auc = auc(fpr, tpr) assert_array_almost_equal(roc_auc, 0.50, decimal=2) assert_equal(fpr.shape, tpr.shape) assert_equal(fpr.shape, thresholds.shape) # hard decisions fpr, tpr, thresholds = roc_curve(y_true, pred) roc_auc = auc(fpr, tpr) assert_array_almost_equal(roc_auc, 0.78, decimal=2) assert_equal(fpr.shape, tpr.shape) assert_equal(fpr.shape, thresholds.shape) def test_roc_curve_one_label(): y_true = [1, 1, 1, 1, 1, 1, 1, 1, 1, 1] y_pred = [0, 1, 0, 1, 0, 1, 0, 1, 0, 1] # assert there are warnings w = UndefinedMetricWarning fpr, tpr, thresholds = assert_warns(w, roc_curve, y_true, y_pred) # all true labels, all fpr should be nan assert_array_equal(fpr, np.nan * np.ones(len(thresholds))) assert_equal(fpr.shape, tpr.shape) assert_equal(fpr.shape, thresholds.shape) # assert there are warnings fpr, tpr, thresholds = assert_warns(w, roc_curve, [1 - x for x in y_true], y_pred) # all negative labels, all tpr should be nan assert_array_equal(tpr, np.nan * np.ones(len(thresholds))) assert_equal(fpr.shape, tpr.shape) assert_equal(fpr.shape, thresholds.shape) def test_roc_curve_toydata(): # Binary classification y_true = [0, 1] y_score = [0, 1] tpr, fpr, _ = roc_curve(y_true, y_score) roc_auc = roc_auc_score(y_true, y_score) assert_array_almost_equal(tpr, [0, 1]) assert_array_almost_equal(fpr, [1, 1]) assert_almost_equal(roc_auc, 1.) y_true = [0, 1] y_score = [1, 0] tpr, fpr, _ = roc_curve(y_true, y_score) roc_auc = roc_auc_score(y_true, y_score) assert_array_almost_equal(tpr, [0, 1, 1]) assert_array_almost_equal(fpr, [0, 0, 1]) assert_almost_equal(roc_auc, 0.) y_true = [1, 0] y_score = [1, 1] tpr, fpr, _ = roc_curve(y_true, y_score) roc_auc = roc_auc_score(y_true, y_score) assert_array_almost_equal(tpr, [0, 1]) assert_array_almost_equal(fpr, [0, 1]) assert_almost_equal(roc_auc, 0.5) y_true = [1, 0] y_score = [1, 0] tpr, fpr, _ = roc_curve(y_true, y_score) roc_auc = roc_auc_score(y_true, y_score) assert_array_almost_equal(tpr, [0, 1]) assert_array_almost_equal(fpr, [1, 1]) assert_almost_equal(roc_auc, 1.) y_true = [1, 0] y_score = [0.5, 0.5] tpr, fpr, _ = roc_curve(y_true, y_score) roc_auc = roc_auc_score(y_true, y_score) assert_array_almost_equal(tpr, [0, 1]) assert_array_almost_equal(fpr, [0, 1]) assert_almost_equal(roc_auc, .5) y_true = [0, 0] y_score = [0.25, 0.75] # assert UndefinedMetricWarning because of no positive sample in y_true tpr, fpr, _ = assert_warns(UndefinedMetricWarning, roc_curve, y_true, y_score) assert_raises(ValueError, roc_auc_score, y_true, y_score) assert_array_almost_equal(tpr, [0., 0.5, 1.]) assert_array_almost_equal(fpr, [np.nan, np.nan, np.nan]) y_true = [1, 1] y_score = [0.25, 0.75] # assert UndefinedMetricWarning because of no negative sample in y_true tpr, fpr, _ = assert_warns(UndefinedMetricWarning, roc_curve, y_true, y_score) assert_raises(ValueError, roc_auc_score, y_true, y_score) assert_array_almost_equal(tpr, [np.nan, np.nan]) assert_array_almost_equal(fpr, [0.5, 1.]) # Multi-label classification task y_true = np.array([[0, 1], [0, 1]]) y_score = np.array([[0, 1], [0, 1]]) assert_raises(ValueError, roc_auc_score, y_true, y_score, average="macro") assert_raises(ValueError, roc_auc_score, y_true, y_score, average="weighted") assert_almost_equal(roc_auc_score(y_true, y_score, average="samples"), 1.) assert_almost_equal(roc_auc_score(y_true, y_score, average="micro"), 1.) y_true = np.array([[0, 1], [0, 1]]) y_score = np.array([[0, 1], [1, 0]]) assert_raises(ValueError, roc_auc_score, y_true, y_score, average="macro") assert_raises(ValueError, roc_auc_score, y_true, y_score, average="weighted") assert_almost_equal(roc_auc_score(y_true, y_score, average="samples"), 0.5) assert_almost_equal(roc_auc_score(y_true, y_score, average="micro"), 0.5) y_true = np.array([[1, 0], [0, 1]]) y_score = np.array([[0, 1], [1, 0]]) assert_almost_equal(roc_auc_score(y_true, y_score, average="macro"), 0) assert_almost_equal(roc_auc_score(y_true, y_score, average="weighted"), 0) assert_almost_equal(roc_auc_score(y_true, y_score, average="samples"), 0) assert_almost_equal(roc_auc_score(y_true, y_score, average="micro"), 0) y_true = np.array([[1, 0], [0, 1]]) y_score = np.array([[0.5, 0.5], [0.5, 0.5]]) assert_almost_equal(roc_auc_score(y_true, y_score, average="macro"), .5) assert_almost_equal(roc_auc_score(y_true, y_score, average="weighted"), .5) assert_almost_equal(roc_auc_score(y_true, y_score, average="samples"), .5) assert_almost_equal(roc_auc_score(y_true, y_score, average="micro"), .5) def test_roc_curve_drop_intermediate(): # Test that drop_intermediate drops the correct thresholds y_true = [0, 0, 0, 0, 1, 1] y_score = [0., 0.2, 0.5, 0.6, 0.7, 1.0] tpr, fpr, thresholds = roc_curve(y_true, y_score, drop_intermediate=True) assert_array_almost_equal(thresholds, [1., 0.7, 0.]) # Test dropping thresholds with repeating scores y_true = [0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1] y_score = [0., 0.1, 0.6, 0.6, 0.7, 0.8, 0.9, 0.6, 0.7, 0.8, 0.9, 0.9, 1.0] tpr, fpr, thresholds = roc_curve(y_true, y_score, drop_intermediate=True) assert_array_almost_equal(thresholds, [1.0, 0.9, 0.7, 0.6, 0.]) def test_auc(): # Test Area Under Curve (AUC) computation x = [0, 1] y = [0, 1] assert_array_almost_equal(auc(x, y), 0.5) x = [1, 0] y = [0, 1] assert_array_almost_equal(auc(x, y), 0.5) x = [1, 0, 0] y = [0, 1, 1] assert_array_almost_equal(auc(x, y), 0.5) x = [0, 1] y = [1, 1] assert_array_almost_equal(auc(x, y), 1) x = [0, 0.5, 1] y = [0, 0.5, 1] assert_array_almost_equal(auc(x, y), 0.5) def test_auc_duplicate_values(): # Test Area Under Curve (AUC) computation with duplicate values # auc() was previously sorting the x and y arrays according to the indices # from numpy.argsort(x), which was reordering the tied 0's in this example # and resulting in an incorrect area computation. This test detects the # error. x = [-2.0, 0.0, 0.0, 0.0, 1.0] y1 = [2.0, 0.0, 0.5, 1.0, 1.0] y2 = [2.0, 1.0, 0.0, 0.5, 1.0] y3 = [2.0, 1.0, 0.5, 0.0, 1.0] for y in (y1, y2, y3): assert_array_almost_equal(auc(x, y, reorder=True), 3.0) def test_auc_errors(): # Incompatible shapes assert_raises(ValueError, auc, [0.0, 0.5, 1.0], [0.1, 0.2]) # Too few x values assert_raises(ValueError, auc, [0.0], [0.1]) # x is not in order assert_raises(ValueError, auc, [1.0, 0.0, 0.5], [0.0, 0.0, 0.0]) def test_auc_score_non_binary_class(): # Test that roc_auc_score function returns an error when trying # to compute AUC for non-binary class values. rng = check_random_state(404) y_pred = rng.rand(10) # y_true contains only one class value y_true = np.zeros(10, dtype="int") assert_raise_message(ValueError, "ROC AUC score is not defined", roc_auc_score, y_true, y_pred) y_true = np.ones(10, dtype="int") assert_raise_message(ValueError, "ROC AUC score is not defined", roc_auc_score, y_true, y_pred) y_true = -np.ones(10, dtype="int") assert_raise_message(ValueError, "ROC AUC score is not defined", roc_auc_score, y_true, y_pred) # y_true contains three different class values y_true = rng.randint(0, 3, size=10) assert_raise_message(ValueError, "multiclass format is not supported", roc_auc_score, y_true, y_pred) clean_warning_registry() with warnings.catch_warnings(record=True): rng = check_random_state(404) y_pred = rng.rand(10) # y_true contains only one class value y_true = np.zeros(10, dtype="int") assert_raise_message(ValueError, "ROC AUC score is not defined", roc_auc_score, y_true, y_pred) y_true = np.ones(10, dtype="int") assert_raise_message(ValueError, "ROC AUC score is not defined", roc_auc_score, y_true, y_pred) y_true = -np.ones(10, dtype="int") assert_raise_message(ValueError, "ROC AUC score is not defined", roc_auc_score, y_true, y_pred) # y_true contains three different class values y_true = rng.randint(0, 3, size=10) assert_raise_message(ValueError, "multiclass format is not supported", roc_auc_score, y_true, y_pred) def test_precision_recall_curve(): y_true, _, probas_pred = make_prediction(binary=True) _test_precision_recall_curve(y_true, probas_pred) # Use {-1, 1} for labels; make sure original labels aren't modified y_true[np.where(y_true == 0)] = -1 y_true_copy = y_true.copy() _test_precision_recall_curve(y_true, probas_pred) assert_array_equal(y_true_copy, y_true) labels = [1, 0, 0, 1] predict_probas = [1, 2, 3, 4] p, r, t = precision_recall_curve(labels, predict_probas) assert_array_almost_equal(p, np.array([0.5, 0.33333333, 0.5, 1., 1.])) assert_array_almost_equal(r, np.array([1., 0.5, 0.5, 0.5, 0.])) assert_array_almost_equal(t, np.array([1, 2, 3, 4])) assert_equal(p.size, r.size) assert_equal(p.size, t.size + 1) def test_precision_recall_curve_pos_label(): y_true, _, probas_pred = make_prediction(binary=False) pos_label = 2 p, r, thresholds = precision_recall_curve(y_true, probas_pred[:, pos_label], pos_label=pos_label) p2, r2, thresholds2 = precision_recall_curve(y_true == pos_label, probas_pred[:, pos_label]) assert_array_almost_equal(p, p2) assert_array_almost_equal(r, r2) assert_array_almost_equal(thresholds, thresholds2) assert_equal(p.size, r.size) assert_equal(p.size, thresholds.size + 1) def _test_precision_recall_curve(y_true, probas_pred): # Test Precision-Recall and aread under PR curve p, r, thresholds = precision_recall_curve(y_true, probas_pred) precision_recall_auc = auc(r, p) assert_array_almost_equal(precision_recall_auc, 0.85, 2) assert_array_almost_equal(precision_recall_auc, average_precision_score(y_true, probas_pred)) assert_almost_equal(_average_precision(y_true, probas_pred), precision_recall_auc, 1) assert_equal(p.size, r.size) assert_equal(p.size, thresholds.size + 1) # Smoke test in the case of proba having only one value p, r, thresholds = precision_recall_curve(y_true, np.zeros_like(probas_pred)) precision_recall_auc = auc(r, p) assert_array_almost_equal(precision_recall_auc, 0.75, 3) assert_equal(p.size, r.size) assert_equal(p.size, thresholds.size + 1) def test_precision_recall_curve_errors(): # Contains non-binary labels assert_raises(ValueError, precision_recall_curve, [0, 1, 2], [[0.0], [1.0], [1.0]]) def test_precision_recall_curve_toydata(): with np.errstate(all="raise"): # Binary classification y_true = [0, 1] y_score = [0, 1] p, r, _ = precision_recall_curve(y_true, y_score) auc_prc = average_precision_score(y_true, y_score) assert_array_almost_equal(p, [1, 1]) assert_array_almost_equal(r, [1, 0]) assert_almost_equal(auc_prc, 1.) y_true = [0, 1] y_score = [1, 0] p, r, _ = precision_recall_curve(y_true, y_score) auc_prc = average_precision_score(y_true, y_score) assert_array_almost_equal(p, [0.5, 0., 1.]) assert_array_almost_equal(r, [1., 0., 0.]) assert_almost_equal(auc_prc, 0.25) y_true = [1, 0] y_score = [1, 1] p, r, _ = precision_recall_curve(y_true, y_score) auc_prc = average_precision_score(y_true, y_score) assert_array_almost_equal(p, [0.5, 1]) assert_array_almost_equal(r, [1., 0]) assert_almost_equal(auc_prc, .75) y_true = [1, 0] y_score = [1, 0] p, r, _ = precision_recall_curve(y_true, y_score) auc_prc = average_precision_score(y_true, y_score) assert_array_almost_equal(p, [1, 1]) assert_array_almost_equal(r, [1, 0]) assert_almost_equal(auc_prc, 1.) y_true = [1, 0] y_score = [0.5, 0.5] p, r, _ = precision_recall_curve(y_true, y_score) auc_prc = average_precision_score(y_true, y_score) assert_array_almost_equal(p, [0.5, 1]) assert_array_almost_equal(r, [1, 0.]) assert_almost_equal(auc_prc, .75) y_true = [0, 0] y_score = [0.25, 0.75] assert_raises(Exception, precision_recall_curve, y_true, y_score) assert_raises(Exception, average_precision_score, y_true, y_score) y_true = [1, 1] y_score = [0.25, 0.75] p, r, _ = precision_recall_curve(y_true, y_score) assert_almost_equal(average_precision_score(y_true, y_score), 1.) assert_array_almost_equal(p, [1., 1., 1.]) assert_array_almost_equal(r, [1, 0.5, 0.]) # Multi-label classification task y_true = np.array([[0, 1], [0, 1]]) y_score = np.array([[0, 1], [0, 1]]) assert_raises(Exception, average_precision_score, y_true, y_score, average="macro") assert_raises(Exception, average_precision_score, y_true, y_score, average="weighted") assert_almost_equal(average_precision_score(y_true, y_score, average="samples"), 1.) assert_almost_equal(average_precision_score(y_true, y_score, average="micro"), 1.) y_true = np.array([[0, 1], [0, 1]]) y_score = np.array([[0, 1], [1, 0]]) assert_raises(Exception, average_precision_score, y_true, y_score, average="macro") assert_raises(Exception, average_precision_score, y_true, y_score, average="weighted") assert_almost_equal(average_precision_score(y_true, y_score, average="samples"), 0.625) assert_almost_equal(average_precision_score(y_true, y_score, average="micro"), 0.625) y_true = np.array([[1, 0], [0, 1]]) y_score = np.array([[0, 1], [1, 0]]) assert_almost_equal(average_precision_score(y_true, y_score, average="macro"), 0.25) assert_almost_equal(average_precision_score(y_true, y_score, average="weighted"), 0.25) assert_almost_equal(average_precision_score(y_true, y_score, average="samples"), 0.25) assert_almost_equal(average_precision_score(y_true, y_score, average="micro"), 0.25) y_true = np.array([[1, 0], [0, 1]]) y_score = np.array([[0.5, 0.5], [0.5, 0.5]]) assert_almost_equal(average_precision_score(y_true, y_score, average="macro"), 0.75) assert_almost_equal(average_precision_score(y_true, y_score, average="weighted"), 0.75) assert_almost_equal(average_precision_score(y_true, y_score, average="samples"), 0.75) assert_almost_equal(average_precision_score(y_true, y_score, average="micro"), 0.75) def test_score_scale_invariance(): # Test that average_precision_score and roc_auc_score are invariant by # the scaling or shifting of probabilities y_true, _, probas_pred = make_prediction(binary=True) roc_auc = roc_auc_score(y_true, probas_pred) roc_auc_scaled = roc_auc_score(y_true, 100 * probas_pred) roc_auc_shifted = roc_auc_score(y_true, probas_pred - 10) assert_equal(roc_auc, roc_auc_scaled) assert_equal(roc_auc, roc_auc_shifted) pr_auc = average_precision_score(y_true, probas_pred) pr_auc_scaled = average_precision_score(y_true, 100 * probas_pred) pr_auc_shifted = average_precision_score(y_true, probas_pred - 10) assert_equal(pr_auc, pr_auc_scaled) assert_equal(pr_auc, pr_auc_shifted) def check_lrap_toy(lrap_score): # Check on several small example that it works assert_almost_equal(lrap_score([[0, 1]], [[0.25, 0.75]]), 1) assert_almost_equal(lrap_score([[0, 1]], [[0.75, 0.25]]), 1 / 2) assert_almost_equal(lrap_score([[1, 1]], [[0.75, 0.25]]), 1) assert_almost_equal(lrap_score([[0, 0, 1]], [[0.25, 0.5, 0.75]]), 1) assert_almost_equal(lrap_score([[0, 1, 0]], [[0.25, 0.5, 0.75]]), 1 / 2) assert_almost_equal(lrap_score([[0, 1, 1]], [[0.25, 0.5, 0.75]]), 1) assert_almost_equal(lrap_score([[1, 0, 0]], [[0.25, 0.5, 0.75]]), 1 / 3) assert_almost_equal(lrap_score([[1, 0, 1]], [[0.25, 0.5, 0.75]]), (2 / 3 + 1 / 1) / 2) assert_almost_equal(lrap_score([[1, 1, 0]], [[0.25, 0.5, 0.75]]), (2 / 3 + 1 / 2) / 2) assert_almost_equal(lrap_score([[0, 0, 1]], [[0.75, 0.5, 0.25]]), 1 / 3) assert_almost_equal(lrap_score([[0, 1, 0]], [[0.75, 0.5, 0.25]]), 1 / 2) assert_almost_equal(lrap_score([[0, 1, 1]], [[0.75, 0.5, 0.25]]), (1 / 2 + 2 / 3) / 2) assert_almost_equal(lrap_score([[1, 0, 0]], [[0.75, 0.5, 0.25]]), 1) assert_almost_equal(lrap_score([[1, 0, 1]], [[0.75, 0.5, 0.25]]), (1 + 2 / 3) / 2) assert_almost_equal(lrap_score([[1, 1, 0]], [[0.75, 0.5, 0.25]]), 1) assert_almost_equal(lrap_score([[1, 1, 1]], [[0.75, 0.5, 0.25]]), 1) assert_almost_equal(lrap_score([[0, 0, 1]], [[0.5, 0.75, 0.25]]), 1 / 3) assert_almost_equal(lrap_score([[0, 1, 0]], [[0.5, 0.75, 0.25]]), 1) assert_almost_equal(lrap_score([[0, 1, 1]], [[0.5, 0.75, 0.25]]), (1 + 2 / 3) / 2) assert_almost_equal(lrap_score([[1, 0, 0]], [[0.5, 0.75, 0.25]]), 1 / 2) assert_almost_equal(lrap_score([[1, 0, 1]], [[0.5, 0.75, 0.25]]), (1 / 2 + 2 / 3) / 2) assert_almost_equal(lrap_score([[1, 1, 0]], [[0.5, 0.75, 0.25]]), 1) assert_almost_equal(lrap_score([[1, 1, 1]], [[0.5, 0.75, 0.25]]), 1) # Tie handling assert_almost_equal(lrap_score([[1, 0]], [[0.5, 0.5]]), 0.5) assert_almost_equal(lrap_score([[0, 1]], [[0.5, 0.5]]), 0.5) assert_almost_equal(lrap_score([[1, 1]], [[0.5, 0.5]]), 1) assert_almost_equal(lrap_score([[0, 0, 1]], [[0.25, 0.5, 0.5]]), 0.5) assert_almost_equal(lrap_score([[0, 1, 0]], [[0.25, 0.5, 0.5]]), 0.5) assert_almost_equal(lrap_score([[0, 1, 1]], [[0.25, 0.5, 0.5]]), 1) assert_almost_equal(lrap_score([[1, 0, 0]], [[0.25, 0.5, 0.5]]), 1 / 3) assert_almost_equal(lrap_score([[1, 0, 1]], [[0.25, 0.5, 0.5]]), (2 / 3 + 1 / 2) / 2) assert_almost_equal(lrap_score([[1, 1, 0]], [[0.25, 0.5, 0.5]]), (2 / 3 + 1 / 2) / 2) assert_almost_equal(lrap_score([[1, 1, 1]], [[0.25, 0.5, 0.5]]), 1) assert_almost_equal(lrap_score([[1, 1, 0]], [[0.5, 0.5, 0.5]]), 2 / 3) assert_almost_equal(lrap_score([[1, 1, 1, 0]], [[0.5, 0.5, 0.5, 0.5]]), 3 / 4) def check_zero_or_all_relevant_labels(lrap_score): random_state = check_random_state(0) for n_labels in range(2, 5): y_score = random_state.uniform(size=(1, n_labels)) y_score_ties = np.zeros_like(y_score) # No relevant labels y_true = np.zeros((1, n_labels)) assert_equal(lrap_score(y_true, y_score), 1.) assert_equal(lrap_score(y_true, y_score_ties), 1.) # Only relevant labels y_true = np.ones((1, n_labels)) assert_equal(lrap_score(y_true, y_score), 1.) assert_equal(lrap_score(y_true, y_score_ties), 1.) # Degenerate case: only one label assert_almost_equal(lrap_score([[1], [0], [1], [0]], [[0.5], [0.5], [0.5], [0.5]]), 1.) def check_lrap_error_raised(lrap_score): # Raise value error if not appropriate format assert_raises(ValueError, lrap_score, [0, 1, 0], [0.25, 0.3, 0.2]) assert_raises(ValueError, lrap_score, [0, 1, 2], [[0.25, 0.75, 0.0], [0.7, 0.3, 0.0], [0.8, 0.2, 0.0]]) assert_raises(ValueError, lrap_score, [(0), (1), (2)], [[0.25, 0.75, 0.0], [0.7, 0.3, 0.0], [0.8, 0.2, 0.0]]) # Check that y_true.shape != y_score.shape raise the proper exception assert_raises(ValueError, lrap_score, [[0, 1], [0, 1]], [0, 1]) assert_raises(ValueError, lrap_score, [[0, 1], [0, 1]], [[0, 1]]) assert_raises(ValueError, lrap_score, [[0, 1], [0, 1]], [[0], [1]]) assert_raises(ValueError, lrap_score, [[0, 1]], [[0, 1], [0, 1]]) assert_raises(ValueError, lrap_score, [[0], [1]], [[0, 1], [0, 1]]) assert_raises(ValueError, lrap_score, [[0, 1], [0, 1]], [[0], [1]]) def check_lrap_only_ties(lrap_score): # Check tie handling in score # Basic check with only ties and increasing label space for n_labels in range(2, 10): y_score = np.ones((1, n_labels)) # Check for growing number of consecutive relevant for n_relevant in range(1, n_labels): # Check for a bunch of positions for pos in range(n_labels - n_relevant): y_true = np.zeros((1, n_labels)) y_true[0, pos:pos + n_relevant] = 1 assert_almost_equal(lrap_score(y_true, y_score), n_relevant / n_labels) def check_lrap_without_tie_and_increasing_score(lrap_score): # Check that Label ranking average precision works for various # Basic check with increasing label space size and decreasing score for n_labels in range(2, 10): y_score = n_labels - (np.arange(n_labels).reshape((1, n_labels)) + 1) # First and last y_true = np.zeros((1, n_labels)) y_true[0, 0] = 1 y_true[0, -1] = 1 assert_almost_equal(lrap_score(y_true, y_score), (2 / n_labels + 1) / 2) # Check for growing number of consecutive relevant label for n_relevant in range(1, n_labels): # Check for a bunch of position for pos in range(n_labels - n_relevant): y_true = np.zeros((1, n_labels)) y_true[0, pos:pos + n_relevant] = 1 assert_almost_equal(lrap_score(y_true, y_score), sum((r + 1) / ((pos + r + 1) * n_relevant) for r in range(n_relevant))) def _my_lrap(y_true, y_score): """Simple implementation of label ranking average precision""" check_consistent_length(y_true, y_score) y_true = check_array(y_true) y_score = check_array(y_score) n_samples, n_labels = y_true.shape score = np.empty((n_samples, )) for i in range(n_samples): # The best rank correspond to 1. Rank higher than 1 are worse. # The best inverse ranking correspond to n_labels. unique_rank, inv_rank = np.unique(y_score[i], return_inverse=True) n_ranks = unique_rank.size rank = n_ranks - inv_rank # Rank need to be corrected to take into account ties # ex: rank 1 ex aequo means that both label are rank 2. corr_rank = np.bincount(rank, minlength=n_ranks + 1).cumsum() rank = corr_rank[rank] relevant = y_true[i].nonzero()[0] if relevant.size == 0 or relevant.size == n_labels: score[i] = 1 continue score[i] = 0. for label in relevant: # Let's count the number of relevant label with better rank # (smaller rank). n_ranked_above = sum(rank[r] <= rank[label] for r in relevant) # Weight by the rank of the actual label score[i] += n_ranked_above / rank[label] score[i] /= relevant.size return score.mean() def check_alternative_lrap_implementation(lrap_score, n_classes=5, n_samples=20, random_state=0): _, y_true = make_multilabel_classification(n_features=1, allow_unlabeled=False, random_state=random_state, n_classes=n_classes, n_samples=n_samples) # Score with ties y_score = sparse_random_matrix(n_components=y_true.shape[0], n_features=y_true.shape[1], random_state=random_state) if hasattr(y_score, "toarray"): y_score = y_score.toarray() score_lrap = label_ranking_average_precision_score(y_true, y_score) score_my_lrap = _my_lrap(y_true, y_score) assert_almost_equal(score_lrap, score_my_lrap) # Uniform score random_state = check_random_state(random_state) y_score = random_state.uniform(size=(n_samples, n_classes)) score_lrap = label_ranking_average_precision_score(y_true, y_score) score_my_lrap = _my_lrap(y_true, y_score) assert_almost_equal(score_lrap, score_my_lrap) def test_label_ranking_avp(): for fn in [label_ranking_average_precision_score, _my_lrap]: yield check_lrap_toy, fn yield check_lrap_without_tie_and_increasing_score, fn yield check_lrap_only_ties, fn yield check_zero_or_all_relevant_labels, fn yield check_lrap_error_raised, label_ranking_average_precision_score for n_samples, n_classes, random_state in product((1, 2, 8, 20), (2, 5, 10), range(1)): yield (check_alternative_lrap_implementation, label_ranking_average_precision_score, n_classes, n_samples, random_state) def test_coverage_error(): # Toy case assert_almost_equal(coverage_error([[0, 1]], [[0.25, 0.75]]), 1) assert_almost_equal(coverage_error([[0, 1]], [[0.75, 0.25]]), 2) assert_almost_equal(coverage_error([[1, 1]], [[0.75, 0.25]]), 2) assert_almost_equal(coverage_error([[0, 0]], [[0.75, 0.25]]), 0) assert_almost_equal(coverage_error([[0, 0, 0]], [[0.25, 0.5, 0.75]]), 0) assert_almost_equal(coverage_error([[0, 0, 1]], [[0.25, 0.5, 0.75]]), 1) assert_almost_equal(coverage_error([[0, 1, 0]], [[0.25, 0.5, 0.75]]), 2) assert_almost_equal(coverage_error([[0, 1, 1]], [[0.25, 0.5, 0.75]]), 2) assert_almost_equal(coverage_error([[1, 0, 0]], [[0.25, 0.5, 0.75]]), 3) assert_almost_equal(coverage_error([[1, 0, 1]], [[0.25, 0.5, 0.75]]), 3) assert_almost_equal(coverage_error([[1, 1, 0]], [[0.25, 0.5, 0.75]]), 3) assert_almost_equal(coverage_error([[1, 1, 1]], [[0.25, 0.5, 0.75]]), 3) assert_almost_equal(coverage_error([[0, 0, 0]], [[0.75, 0.5, 0.25]]), 0) assert_almost_equal(coverage_error([[0, 0, 1]], [[0.75, 0.5, 0.25]]), 3) assert_almost_equal(coverage_error([[0, 1, 0]], [[0.75, 0.5, 0.25]]), 2) assert_almost_equal(coverage_error([[0, 1, 1]], [[0.75, 0.5, 0.25]]), 3) assert_almost_equal(coverage_error([[1, 0, 0]], [[0.75, 0.5, 0.25]]), 1) assert_almost_equal(coverage_error([[1, 0, 1]], [[0.75, 0.5, 0.25]]), 3) assert_almost_equal(coverage_error([[1, 1, 0]], [[0.75, 0.5, 0.25]]), 2) assert_almost_equal(coverage_error([[1, 1, 1]], [[0.75, 0.5, 0.25]]), 3) assert_almost_equal(coverage_error([[0, 0, 0]], [[0.5, 0.75, 0.25]]), 0) assert_almost_equal(coverage_error([[0, 0, 1]], [[0.5, 0.75, 0.25]]), 3) assert_almost_equal(coverage_error([[0, 1, 0]], [[0.5, 0.75, 0.25]]), 1) assert_almost_equal(coverage_error([[0, 1, 1]], [[0.5, 0.75, 0.25]]), 3) assert_almost_equal(coverage_error([[1, 0, 0]], [[0.5, 0.75, 0.25]]), 2) assert_almost_equal(coverage_error([[1, 0, 1]], [[0.5, 0.75, 0.25]]), 3) assert_almost_equal(coverage_error([[1, 1, 0]], [[0.5, 0.75, 0.25]]), 2) assert_almost_equal(coverage_error([[1, 1, 1]], [[0.5, 0.75, 0.25]]), 3) # Non trival case assert_almost_equal(coverage_error([[0, 1, 0], [1, 1, 0]], [[0.1, 10., -3], [0, 1, 3]]), (1 + 3) / 2.) assert_almost_equal(coverage_error([[0, 1, 0], [1, 1, 0], [0, 1, 1]], [[0.1, 10, -3], [0, 1, 3], [0, 2, 0]]), (1 + 3 + 3) / 3.) assert_almost_equal(coverage_error([[0, 1, 0], [1, 1, 0], [0, 1, 1]], [[0.1, 10, -3], [3, 1, 3], [0, 2, 0]]), (1 + 3 + 3) / 3.) def test_coverage_tie_handling(): assert_almost_equal(coverage_error([[0, 0]], [[0.5, 0.5]]), 0) assert_almost_equal(coverage_error([[1, 0]], [[0.5, 0.5]]), 2) assert_almost_equal(coverage_error([[0, 1]], [[0.5, 0.5]]), 2) assert_almost_equal(coverage_error([[1, 1]], [[0.5, 0.5]]), 2) assert_almost_equal(coverage_error([[0, 0, 0]], [[0.25, 0.5, 0.5]]), 0) assert_almost_equal(coverage_error([[0, 0, 1]], [[0.25, 0.5, 0.5]]), 2) assert_almost_equal(coverage_error([[0, 1, 0]], [[0.25, 0.5, 0.5]]), 2) assert_almost_equal(coverage_error([[0, 1, 1]], [[0.25, 0.5, 0.5]]), 2) assert_almost_equal(coverage_error([[1, 0, 0]], [[0.25, 0.5, 0.5]]), 3) assert_almost_equal(coverage_error([[1, 0, 1]], [[0.25, 0.5, 0.5]]), 3) assert_almost_equal(coverage_error([[1, 1, 0]], [[0.25, 0.5, 0.5]]), 3) assert_almost_equal(coverage_error([[1, 1, 1]], [[0.25, 0.5, 0.5]]), 3) def test_label_ranking_loss(): assert_almost_equal(label_ranking_loss([[0, 1]], [[0.25, 0.75]]), 0) assert_almost_equal(label_ranking_loss([[0, 1]], [[0.75, 0.25]]), 1) assert_almost_equal(label_ranking_loss([[0, 0, 1]], [[0.25, 0.5, 0.75]]), 0) assert_almost_equal(label_ranking_loss([[0, 1, 0]], [[0.25, 0.5, 0.75]]), 1 / 2) assert_almost_equal(label_ranking_loss([[0, 1, 1]], [[0.25, 0.5, 0.75]]), 0) assert_almost_equal(label_ranking_loss([[1, 0, 0]], [[0.25, 0.5, 0.75]]), 2 / 2) assert_almost_equal(label_ranking_loss([[1, 0, 1]], [[0.25, 0.5, 0.75]]), 1 / 2) assert_almost_equal(label_ranking_loss([[1, 1, 0]], [[0.25, 0.5, 0.75]]), 2 / 2) # Undefined metrics - the ranking doesn't matter assert_almost_equal(label_ranking_loss([[0, 0]], [[0.75, 0.25]]), 0) assert_almost_equal(label_ranking_loss([[1, 1]], [[0.75, 0.25]]), 0) assert_almost_equal(label_ranking_loss([[0, 0]], [[0.5, 0.5]]), 0) assert_almost_equal(label_ranking_loss([[1, 1]], [[0.5, 0.5]]), 0) assert_almost_equal(label_ranking_loss([[0, 0, 0]], [[0.5, 0.75, 0.25]]), 0) assert_almost_equal(label_ranking_loss([[1, 1, 1]], [[0.5, 0.75, 0.25]]), 0) assert_almost_equal(label_ranking_loss([[0, 0, 0]], [[0.25, 0.5, 0.5]]), 0) assert_almost_equal(label_ranking_loss([[1, 1, 1]], [[0.25, 0.5, 0.5]]), 0) # Non trival case assert_almost_equal(label_ranking_loss([[0, 1, 0], [1, 1, 0]], [[0.1, 10., -3], [0, 1, 3]]), (0 + 2 / 2) / 2.) assert_almost_equal(label_ranking_loss( [[0, 1, 0], [1, 1, 0], [0, 1, 1]], [[0.1, 10, -3], [0, 1, 3], [0, 2, 0]]), (0 + 2 / 2 + 1 / 2) / 3.) assert_almost_equal(label_ranking_loss( [[0, 1, 0], [1, 1, 0], [0, 1, 1]], [[0.1, 10, -3], [3, 1, 3], [0, 2, 0]]), (0 + 2 / 2 + 1 / 2) / 3.) # Sparse csr matrices assert_almost_equal(label_ranking_loss( csr_matrix(np.array([[0, 1, 0], [1, 1, 0]])), [[0.1, 10, -3], [3, 1, 3]]), (0 + 2 / 2) / 2.) def test_ranking_appropriate_input_shape(): # Check that y_true.shape != y_score.shape raise the proper exception assert_raises(ValueError, label_ranking_loss, [[0, 1], [0, 1]], [0, 1]) assert_raises(ValueError, label_ranking_loss, [[0, 1], [0, 1]], [[0, 1]]) assert_raises(ValueError, label_ranking_loss, [[0, 1], [0, 1]], [[0], [1]]) assert_raises(ValueError, label_ranking_loss, [[0, 1]], [[0, 1], [0, 1]]) assert_raises(ValueError, label_ranking_loss, [[0], [1]], [[0, 1], [0, 1]]) assert_raises(ValueError, label_ranking_loss, [[0, 1], [0, 1]], [[0], [1]]) def test_ranking_loss_ties_handling(): # Tie handling assert_almost_equal(label_ranking_loss([[1, 0]], [[0.5, 0.5]]), 1) assert_almost_equal(label_ranking_loss([[0, 1]], [[0.5, 0.5]]), 1) assert_almost_equal(label_ranking_loss([[0, 0, 1]], [[0.25, 0.5, 0.5]]), 1 / 2) assert_almost_equal(label_ranking_loss([[0, 1, 0]], [[0.25, 0.5, 0.5]]), 1 / 2) assert_almost_equal(label_ranking_loss([[0, 1, 1]], [[0.25, 0.5, 0.5]]), 0) assert_almost_equal(label_ranking_loss([[1, 0, 0]], [[0.25, 0.5, 0.5]]), 1) assert_almost_equal(label_ranking_loss([[1, 0, 1]], [[0.25, 0.5, 0.5]]), 1) assert_almost_equal(label_ranking_loss([[1, 1, 0]], [[0.25, 0.5, 0.5]]), 1)
bsd-3-clause
lucidfrontier45/scikit-learn
sklearn/svm/tests/test_svm.py
2
20512
""" Testing for Support Vector Machine module (sklearn.svm) TODO: remove hard coded numerical results when possible """ import warnings import numpy as np from numpy.testing import (assert_array_equal, assert_array_almost_equal, assert_almost_equal) from scipy import sparse from nose.tools import assert_raises, assert_true, assert_equal, assert_false from sklearn import svm, linear_model, datasets, metrics, base from sklearn.datasets.samples_generator import make_classification from sklearn.metrics import f1_score from sklearn.utils import check_random_state from sklearn.utils import ConvergenceWarning from sklearn.utils.testing import assert_greater, assert_less # toy sample X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]] Y = [1, 1, 1, 2, 2, 2] T = [[-1, -1], [2, 2], [3, 2]] true_result = [1, 2, 2] # also load the iris dataset iris = datasets.load_iris() rng = check_random_state(42) perm = rng.permutation(iris.target.size) iris.data = iris.data[perm] iris.target = iris.target[perm] def test_libsvm_parameters(): """ Test parameters on classes that make use of libsvm. """ clf = svm.SVC(kernel='linear').fit(X, Y) assert_array_equal(clf.dual_coef_, [[0.25, -.25]]) assert_array_equal(clf.support_, [1, 3]) assert_array_equal(clf.support_vectors_, (X[1], X[3])) assert_array_equal(clf.intercept_, [0.]) assert_array_equal(clf.predict(X), Y) def test_libsvm_iris(): """ Check consistency on dataset iris. """ # shuffle the dataset so that labels are not ordered for k in ('linear', 'rbf'): clf = svm.SVC(kernel=k).fit(iris.data, iris.target) assert_greater(np.mean(clf.predict(iris.data) == iris.target), 0.9) # check deprecated ``label_`` attribute: with warnings.catch_warnings(record=True): # catch deprecation warning assert_array_equal(clf.label_, np.sort(clf.label_)) assert_array_equal(clf.classes_, np.sort(clf.classes_)) # check also the low-level API model = svm.libsvm.fit(iris.data, iris.target.astype(np.float64)) pred = svm.libsvm.predict(iris.data, *model) assert_greater(np.mean(pred == iris.target), .95) model = svm.libsvm.fit(iris.data, iris.target.astype(np.float64), kernel='linear') pred = svm.libsvm.predict(iris.data, *model, kernel='linear') assert_greater(np.mean(pred == iris.target), .95) pred = svm.libsvm.cross_validation(iris.data, iris.target.astype(np.float64), 5, kernel='linear') assert_greater(np.mean(pred == iris.target), .95) def test_single_sample_1d(): """ Test whether SVCs work on a single sample given as a 1-d array """ clf = svm.SVC().fit(X, Y) clf.predict(X[0]) clf = svm.LinearSVC(random_state=0).fit(X, Y) clf.predict(X[0]) def test_precomputed(): """ SVC with a precomputed kernel. We test it with a toy dataset and with iris. """ clf = svm.SVC(kernel='precomputed') # Gram matrix for train data (square matrix) # (we use just a linear kernel) K = np.dot(X, np.array(X).T) clf.fit(K, Y) # Gram matrix for test data (rectangular matrix) KT = np.dot(T, np.array(X).T) pred = clf.predict(KT) assert_raises(ValueError, clf.predict, KT.T) assert_array_equal(clf.dual_coef_, [[0.25, -.25]]) assert_array_equal(clf.support_, [1, 3]) assert_array_equal(clf.intercept_, [0]) assert_array_almost_equal(clf.support_, [1, 3]) assert_array_equal(pred, true_result) # Gram matrix for test data but compute KT[i,j] # for support vectors j only. KT = np.zeros_like(KT) for i in range(len(T)): for j in clf.support_: KT[i, j] = np.dot(T[i], X[j]) pred = clf.predict(KT) assert_array_equal(pred, true_result) # same as before, but using a callable function instead of the kernel # matrix. kernel is just a linear kernel kfunc = lambda x, y: np.dot(x, y.T) clf = svm.SVC(kernel=kfunc) clf.fit(X, Y) pred = clf.predict(T) assert_array_equal(clf.dual_coef_, [[0.25, -.25]]) assert_array_equal(clf.intercept_, [0]) assert_array_almost_equal(clf.support_, [1, 3]) assert_array_equal(pred, true_result) # test a precomputed kernel with the iris dataset # and check parameters against a linear SVC clf = svm.SVC(kernel='precomputed') clf2 = svm.SVC(kernel='linear') K = np.dot(iris.data, iris.data.T) clf.fit(K, iris.target) clf2.fit(iris.data, iris.target) pred = clf.predict(K) assert_array_almost_equal(clf.support_, clf2.support_) assert_array_almost_equal(clf.dual_coef_, clf2.dual_coef_) assert_array_almost_equal(clf.intercept_, clf2.intercept_) assert_almost_equal(np.mean(pred == iris.target), .99, decimal=2) # Gram matrix for test data but compute KT[i,j] # for support vectors j only. K = np.zeros_like(K) for i in range(len(iris.data)): for j in clf.support_: K[i, j] = np.dot(iris.data[i], iris.data[j]) pred = clf.predict(K) assert_almost_equal(np.mean(pred == iris.target), .99, decimal=2) clf = svm.SVC(kernel=kfunc) clf.fit(iris.data, iris.target) assert_almost_equal(np.mean(pred == iris.target), .99, decimal=2) def test_svr(): """ Test Support Vector Regression """ diabetes = datasets.load_diabetes() for clf in (svm.NuSVR(kernel='linear', nu=.4, C=1.0), svm.NuSVR(kernel='linear', nu=.4, C=10.), svm.SVR(kernel='linear', C=10.)): clf.fit(diabetes.data, diabetes.target) assert_greater(clf.score(diabetes.data, diabetes.target), 0.02) def test_svr_errors(): X = [[0.0], [1.0]] y = [0.0, 0.5] # Bad kernel clf = svm.SVR(kernel=lambda x, y: np.array([[1.0]])) clf.fit(X, y) assert_raises(ValueError, clf.predict, X) def test_oneclass(): """ Test OneClassSVM """ clf = svm.OneClassSVM() clf.fit(X) pred = clf.predict(T) assert_array_almost_equal(pred, [-1, -1, -1]) assert_array_almost_equal(clf.intercept_, [-1.008], decimal=3) assert_array_almost_equal(clf.dual_coef_, [[0.632, 0.233, 0.633, 0.234, 0.632, 0.633]], decimal=3) assert_raises(ValueError, lambda: clf.coef_) def test_oneclass_decision_function(): """ Test OneClassSVM decision function """ clf = svm.OneClassSVM() rnd = check_random_state(2) # Generate train data X = 0.3 * rnd.randn(100, 2) X_train = np.r_[X + 2, X - 2] # Generate some regular novel observations X = 0.3 * rnd.randn(20, 2) X_test = np.r_[X + 2, X - 2] # Generate some abnormal novel observations X_outliers = rnd.uniform(low=-4, high=4, size=(20, 2)) # fit the model clf = svm.OneClassSVM(nu=0.1, kernel="rbf", gamma=0.1) clf.fit(X_train) # predict things y_pred_test = clf.predict(X_test) assert_greater(np.mean(y_pred_test == 1), .9) y_pred_outliers = clf.predict(X_outliers) assert_greater(np.mean(y_pred_outliers == -1), .9) dec_func_test = clf.decision_function(X_test) assert_array_equal((dec_func_test > 0).ravel(), y_pred_test == 1) dec_func_outliers = clf.decision_function(X_outliers) assert_array_equal((dec_func_outliers > 0).ravel(), y_pred_outliers == 1) def test_tweak_params(): """ Make sure some tweaking of parameters works. We change clf.dual_coef_ at run time and expect .predict() to change accordingly. Notice that this is not trivial since it involves a lot of C/Python copying in the libsvm bindings. The success of this test ensures that the mapping between libsvm and the python classifier is complete. """ clf = svm.SVC(kernel='linear', C=1.0) clf.fit(X, Y) assert_array_equal(clf.dual_coef_, [[.25, -.25]]) assert_array_equal(clf.predict([[-.1, -.1]]), [1]) clf.dual_coef_ = np.array([[.0, 1.]]) assert_array_equal(clf.predict([[-.1, -.1]]), [2]) def test_probability(): """ Predict probabilities using SVC This uses cross validation, so we use a slightly bigger testing set. """ for clf in (svm.SVC(probability=True, C=1.0), svm.NuSVC(probability=True)): clf.fit(iris.data, iris.target) prob_predict = clf.predict_proba(iris.data) assert_array_almost_equal( np.sum(prob_predict, 1), np.ones(iris.data.shape[0])) assert_true(np.mean(np.argmax(prob_predict, 1) == clf.predict(iris.data)) > 0.9) assert_almost_equal(clf.predict_proba(iris.data), np.exp(clf.predict_log_proba(iris.data)), 8) def test_decision_function(): """ Test decision_function Sanity check, test that decision_function implemented in python returns the same as the one in libsvm """ # multi class: clf = svm.SVC(kernel='linear', C=0.1).fit(iris.data, iris.target) dec = np.dot(iris.data, clf.coef_.T) + clf.intercept_ assert_array_almost_equal(dec, clf.decision_function(iris.data)) # binary: clf.fit(X, Y) dec = np.dot(X, clf.coef_.T) + clf.intercept_ prediction = clf.predict(X) assert_array_almost_equal(dec, clf.decision_function(X)) assert_array_almost_equal( prediction, clf.classes_[(clf.decision_function(X) > 0).astype(np.int).ravel()]) expected = np.array([[-1.], [-0.66], [-1.], [0.66], [1.], [1.]]) assert_array_almost_equal(clf.decision_function(X), expected, 2) def test_weight(): """ Test class weights """ clf = svm.SVC(class_weight={1: 0.1}) # we give a small weights to class 1 clf.fit(X, Y) # so all predicted values belong to class 2 assert_array_almost_equal(clf.predict(X), [2] * 6) X_, y_ = make_classification(n_samples=200, n_features=10, weights=[0.833, 0.167], random_state=2) for clf in (linear_model.LogisticRegression(), svm.LinearSVC(random_state=0), svm.SVC()): clf.set_params(class_weight={0: .1, 1: 10}) clf.fit(X_[:100], y_[:100]) y_pred = clf.predict(X_[100:]) assert_true(f1_score(y_[100:], y_pred) > .3) def test_sample_weights(): """ Test weights on individual samples """ # TODO: check on NuSVR, OneClass, etc. clf = svm.SVC() clf.fit(X, Y) assert_array_equal(clf.predict(X[2]), [1.]) sample_weight = [.1] * 3 + [10] * 3 clf.fit(X, Y, sample_weight=sample_weight) assert_array_equal(clf.predict(X[2]), [2.]) def test_auto_weight(): """Test class weights for imbalanced data""" from sklearn.linear_model import LogisticRegression # we take as dataset a the two-dimensional projection of iris so # that it is not separable and remove half of predictors from # class 1 from sklearn.utils import compute_class_weight X, y = iris.data[:, :2], iris.target unbalanced = np.delete(np.arange(y.size), np.where(y > 1)[0][::2]) classes = np.unique(y[unbalanced]) class_weights = compute_class_weight('auto', classes, y[unbalanced]) assert_true(np.argmax(class_weights) == 2) for clf in (svm.SVC(kernel='linear'), svm.LinearSVC(random_state=0), LogisticRegression()): # check that score is better when class='auto' is set. y_pred = clf.fit(X[unbalanced], y[unbalanced]).predict(X) clf.set_params(class_weight='auto') y_pred_balanced = clf.fit(X[unbalanced], y[unbalanced],).predict(X) assert_true(metrics.f1_score(y, y_pred) <= metrics.f1_score(y, y_pred_balanced)) def test_bad_input(): """ Test that it gives proper exception on deficient input """ # impossible value of C assert_raises(ValueError, svm.SVC(C=-1).fit, X, Y) # impossible value of nu clf = svm.NuSVC(nu=0.0) assert_raises(ValueError, clf.fit, X, Y) Y2 = Y[:-1] # wrong dimensions for labels assert_raises(ValueError, clf.fit, X, Y2) # Test with arrays that are non-contiguous. for clf in (svm.SVC(), svm.LinearSVC(random_state=0)): Xf = np.asfortranarray(X) assert_false(Xf.flags['C_CONTIGUOUS']) yf = np.ascontiguousarray(np.tile(Y, (2, 1)).T) yf = yf[:, -1] assert_false(yf.flags['F_CONTIGUOUS']) assert_false(yf.flags['C_CONTIGUOUS']) clf.fit(Xf, yf) assert_array_equal(clf.predict(T), true_result) # error for precomputed kernelsx clf = svm.SVC(kernel='precomputed') assert_raises(ValueError, clf.fit, X, Y) # sample_weight bad dimensions clf = svm.SVC() assert_raises(ValueError, clf.fit, X, Y, sample_weight=range(len(X) - 1)) # predict with sparse input when trained with dense clf = svm.SVC().fit(X, Y) assert_raises(ValueError, clf.predict, sparse.lil_matrix(X)) Xt = np.array(X).T clf.fit(np.dot(X, Xt), Y) assert_raises(ValueError, clf.predict, X) clf = svm.SVC() clf.fit(X, Y) assert_raises(ValueError, clf.predict, Xt) def test_linearsvc_parameters(): """ Test possible parameter combinations in LinearSVC """ # generate list of possible parameter combinations params = [(dual, loss, penalty) for dual in [True, False] for loss in ['l1', 'l2', 'lr'] for penalty in ['l1', 'l2']] for dual, loss, penalty in params: if loss == 'l1' and penalty == 'l1': assert_raises(ValueError, svm.LinearSVC, penalty=penalty, loss=loss, dual=dual) elif loss == 'l1' and penalty == 'l2' and not dual: assert_raises(ValueError, svm.LinearSVC, penalty=penalty, loss=loss, dual=dual) elif penalty == 'l1' and dual: assert_raises(ValueError, svm.LinearSVC, penalty=penalty, loss=loss, dual=dual) else: svm.LinearSVC(penalty=penalty, loss=loss, dual=dual) def test_linearsvc(): """ Test basic routines using LinearSVC """ clf = svm.LinearSVC(random_state=0).fit(X, Y) # by default should have intercept assert_true(clf.fit_intercept) assert_array_equal(clf.predict(T), true_result) assert_array_almost_equal(clf.intercept_, [0], decimal=3) # the same with l1 penalty clf = svm.LinearSVC(penalty='l1', dual=False, random_state=0).fit(X, Y) assert_array_equal(clf.predict(T), true_result) # l2 penalty with dual formulation clf = svm.LinearSVC(penalty='l2', dual=True, random_state=0).fit(X, Y) assert_array_equal(clf.predict(T), true_result) # l2 penalty, l1 loss clf = svm.LinearSVC(penalty='l2', loss='l1', dual=True, random_state=0) clf.fit(X, Y) assert_array_equal(clf.predict(T), true_result) # test also decision function dec = clf.decision_function(T) res = (dec > 0).astype(np.int) + 1 assert_array_equal(res, true_result) def test_linearsvc_crammer_singer(): """Test LinearSVC with crammer_singer multi-class svm""" ovr_clf = svm.LinearSVC(random_state=0).fit(iris.data, iris.target) cs_clf = svm.LinearSVC(multi_class='crammer_singer', random_state=0) cs_clf.fit(iris.data, iris.target) # similar prediction for ovr and crammer-singer: assert_true((ovr_clf.predict(iris.data) == cs_clf.predict(iris.data)).mean() > .9) # classifiers shouldn't be the same assert_true((ovr_clf.coef_ != cs_clf.coef_).all()) # test decision function assert_array_equal(cs_clf.predict(iris.data), np.argmax(cs_clf.decision_function(iris.data), axis=1)) dec_func = np.dot(iris.data, cs_clf.coef_.T) + cs_clf.intercept_ assert_array_almost_equal(dec_func, cs_clf.decision_function(iris.data)) def test_linearsvc_iris(): """ Test that LinearSVC gives plausible predictions on the iris dataset Also, test symbolic class names (classes_). """ target = iris.target_names[iris.target] clf = svm.LinearSVC(random_state=0).fit(iris.data, target) assert_equal(set(clf.classes_), set(iris.target_names)) assert_greater(np.mean(clf.predict(iris.data) == target), 0.8) dec = clf.decision_function(iris.data) pred = iris.target_names[np.argmax(dec, 1)] assert_array_equal(pred, clf.predict(iris.data)) def test_dense_liblinear_intercept_handling(classifier=svm.LinearSVC): """ Test that dense liblinear honours intercept_scaling param """ X = [[2, 1], [3, 1], [1, 3], [2, 3]] y = [0, 0, 1, 1] clf = classifier(fit_intercept=True, penalty='l1', loss='l2', dual=False, C=4, tol=1e-7, random_state=0) assert_true(clf.intercept_scaling == 1, clf.intercept_scaling) assert_true(clf.fit_intercept) # when intercept_scaling is low the intercept value is highly "penalized" # by regularization clf.intercept_scaling = 1 clf.fit(X, y) assert_almost_equal(clf.intercept_, 0, decimal=5) # when intercept_scaling is sufficiently high, the intercept value # is not affected by regularization clf.intercept_scaling = 100 clf.fit(X, y) intercept1 = clf.intercept_ assert_less(intercept1, -1) # when intercept_scaling is sufficiently high, the intercept value # doesn't depend on intercept_scaling value clf.intercept_scaling = 1000 clf.fit(X, y) intercept2 = clf.intercept_ assert_array_almost_equal(intercept1, intercept2, decimal=2) def test_liblinear_set_coef(): # multi-class case clf = svm.LinearSVC().fit(iris.data, iris.target) values = clf.decision_function(iris.data) clf.coef_ = clf.coef_.copy() clf.intercept_ = clf.intercept_.copy() values2 = clf.decision_function(iris.data) assert_array_almost_equal(values, values2) # binary-class case X = [[2, 1], [3, 1], [1, 3], [2, 3]] y = [0, 0, 1, 1] clf = svm.LinearSVC().fit(X, y) values = clf.decision_function(X) clf.coef_ = clf.coef_.copy() clf.intercept_ = clf.intercept_.copy() values2 = clf.decision_function(X) assert_array_equal(values, values2) def test_immutable_coef_property(): """Check that primal coef modification are not silently ignored""" svms = [ svm.SVC(kernel='linear').fit(iris.data, iris.target), svm.NuSVC(kernel='linear').fit(iris.data, iris.target), svm.SVR(kernel='linear').fit(iris.data, iris.target), svm.NuSVR(kernel='linear').fit(iris.data, iris.target), svm.OneClassSVM(kernel='linear').fit(iris.data), ] for clf in svms: assert_raises(AttributeError, clf.__setattr__, 'coef_', np.arange(3)) assert_raises((RuntimeError, ValueError), clf.coef_.__setitem__, (0, 0), 0) def test_inheritance(): # check that SVC classes can do inheritance class ChildSVC(svm.SVC): def __init__(self, foo=0): self.foo = foo svm.SVC.__init__(self) clf = ChildSVC() clf.fit(iris.data, iris.target) clf.predict(iris.data[-1]) clf.decision_function(iris.data[-1]) def test_linearsvc_verbose(): # stdout: redirect import os stdout = os.dup(1) # save original stdout os.dup2(os.pipe()[1], 1) # replace it # actual call clf = svm.LinearSVC(verbose=1) clf.fit(X, Y) # stdout: restore os.dup2(stdout, 1) # restore original stdout def test_svc_clone_with_callable_kernel(): a = svm.SVC(kernel=lambda x, y: np.dot(x, y.T), probability=True) b = base.clone(a) b.fit(X, Y) b.predict(X) b.predict_proba(X) b.decision_function(X) def test_svc_bad_kernel(): svc = svm.SVC(kernel=lambda x, y: x) assert_raises(ValueError, svc.fit, X, Y) def test_timeout(): a = svm.SVC(kernel=lambda x, y: np.dot(x, y.T), probability=True, max_iter=1) with warnings.catch_warnings(record=True) as foo: # Hackish way to reset the warning counter from sklearn.svm import base base.__warningregistry__ = {} warnings.simplefilter("always") a.fit(X, Y) assert_equal(len(foo), 1, msg=foo) assert_equal(foo[0].category, ConvergenceWarning, msg=foo[0].category) if __name__ == '__main__': import nose nose.runmodule()
bsd-3-clause
alexeyum/scikit-learn
examples/cluster/plot_kmeans_silhouette_analysis.py
82
5888
""" =============================================================================== Selecting the number of clusters with silhouette analysis on KMeans clustering =============================================================================== Silhouette analysis can be used to study the separation distance between the resulting clusters. The silhouette plot displays a measure of how close each point in one cluster is to points in the neighboring clusters and thus provides a way to assess parameters like number of clusters visually. This measure has a range of [-1, 1]. Silhouette coefficients (as these values are referred to as) near +1 indicate that the sample is far away from the neighboring clusters. A value of 0 indicates that the sample is on or very close to the decision boundary between two neighboring clusters and negative values indicate that those samples might have been assigned to the wrong cluster. In this example the silhouette analysis is used to choose an optimal value for ``n_clusters``. The silhouette plot shows that the ``n_clusters`` value of 3, 5 and 6 are a bad pick for the given data due to the presence of clusters with below average silhouette scores and also due to wide fluctuations in the size of the silhouette plots. Silhouette analysis is more ambivalent in deciding between 2 and 4. Also from the thickness of the silhouette plot the cluster size can be visualized. The silhouette plot for cluster 0 when ``n_clusters`` is equal to 2, is bigger in size owing to the grouping of the 3 sub clusters into one big cluster. However when the ``n_clusters`` is equal to 4, all the plots are more or less of similar thickness and hence are of similar sizes as can be also verified from the labelled scatter plot on the right. """ from __future__ import print_function from sklearn.datasets import make_blobs from sklearn.cluster import KMeans from sklearn.metrics import silhouette_samples, silhouette_score import matplotlib.pyplot as plt import matplotlib.cm as cm import numpy as np print(__doc__) # Generating the sample data from make_blobs # This particular setting has one distinct cluster and 3 clusters placed close # together. X, y = make_blobs(n_samples=500, n_features=2, centers=4, cluster_std=1, center_box=(-10.0, 10.0), shuffle=True, random_state=1) # For reproducibility range_n_clusters = [2, 3, 4, 5, 6] for n_clusters in range_n_clusters: # Create a subplot with 1 row and 2 columns fig, (ax1, ax2) = plt.subplots(1, 2) fig.set_size_inches(18, 7) # The 1st subplot is the silhouette plot # The silhouette coefficient can range from -1, 1 but in this example all # lie within [-0.1, 1] ax1.set_xlim([-0.1, 1]) # The (n_clusters+1)*10 is for inserting blank space between silhouette # plots of individual clusters, to demarcate them clearly. ax1.set_ylim([0, len(X) + (n_clusters + 1) * 10]) # Initialize the clusterer with n_clusters value and a random generator # seed of 10 for reproducibility. clusterer = KMeans(n_clusters=n_clusters, random_state=10) cluster_labels = clusterer.fit_predict(X) # The silhouette_score gives the average value for all the samples. # This gives a perspective into the density and separation of the formed # clusters silhouette_avg = silhouette_score(X, cluster_labels) print("For n_clusters =", n_clusters, "The average silhouette_score is :", silhouette_avg) # Compute the silhouette scores for each sample sample_silhouette_values = silhouette_samples(X, cluster_labels) y_lower = 10 for i in range(n_clusters): # Aggregate the silhouette scores for samples belonging to # cluster i, and sort them ith_cluster_silhouette_values = \ sample_silhouette_values[cluster_labels == i] ith_cluster_silhouette_values.sort() size_cluster_i = ith_cluster_silhouette_values.shape[0] y_upper = y_lower + size_cluster_i color = cm.spectral(float(i) / n_clusters) ax1.fill_betweenx(np.arange(y_lower, y_upper), 0, ith_cluster_silhouette_values, facecolor=color, edgecolor=color, alpha=0.7) # Label the silhouette plots with their cluster numbers at the middle ax1.text(-0.05, y_lower + 0.5 * size_cluster_i, str(i)) # Compute the new y_lower for next plot y_lower = y_upper + 10 # 10 for the 0 samples ax1.set_title("The silhouette plot for the various clusters.") ax1.set_xlabel("The silhouette coefficient values") ax1.set_ylabel("Cluster label") # The vertical line for average silhouette score of all the values ax1.axvline(x=silhouette_avg, color="red", linestyle="--") ax1.set_yticks([]) # Clear the yaxis labels / ticks ax1.set_xticks([-0.1, 0, 0.2, 0.4, 0.6, 0.8, 1]) # 2nd Plot showing the actual clusters formed colors = cm.spectral(cluster_labels.astype(float) / n_clusters) ax2.scatter(X[:, 0], X[:, 1], marker='.', s=30, lw=0, alpha=0.7, c=colors) # Labeling the clusters centers = clusterer.cluster_centers_ # Draw white circles at cluster centers ax2.scatter(centers[:, 0], centers[:, 1], marker='o', c="white", alpha=1, s=200) for i, c in enumerate(centers): ax2.scatter(c[0], c[1], marker='$%d$' % i, alpha=1, s=50) ax2.set_title("The visualization of the clustered data.") ax2.set_xlabel("Feature space for the 1st feature") ax2.set_ylabel("Feature space for the 2nd feature") plt.suptitle(("Silhouette analysis for KMeans clustering on sample data " "with n_clusters = %d" % n_clusters), fontsize=14, fontweight='bold') plt.show()
bsd-3-clause
DSLituiev/scikit-learn
examples/text/document_classification_20newsgroups.py
36
10499
""" ====================================================== Classification of text documents using sparse features ====================================================== This is an example showing how scikit-learn can be used to classify documents by topics using a bag-of-words approach. This example uses a scipy.sparse matrix to store the features and demonstrates various classifiers that can efficiently handle sparse matrices. The dataset used in this example is the 20 newsgroups dataset. It will be automatically downloaded, then cached. The bar plot indicates the accuracy, training time (normalized) and test time (normalized) of each classifier. """ # Author: Peter Prettenhofer <peter.prettenhofer@gmail.com> # Olivier Grisel <olivier.grisel@ensta.org> # Mathieu Blondel <mathieu@mblondel.org> # Lars Buitinck # License: BSD 3 clause from __future__ import print_function import logging import numpy as np from optparse import OptionParser import sys from time import time import matplotlib.pyplot as plt from sklearn.datasets import fetch_20newsgroups from sklearn.feature_extraction.text import TfidfVectorizer from sklearn.feature_extraction.text import HashingVectorizer from sklearn.feature_selection import SelectKBest, chi2 from sklearn.linear_model import RidgeClassifier from sklearn.pipeline import Pipeline from sklearn.svm import LinearSVC from sklearn.linear_model import SGDClassifier from sklearn.linear_model import Perceptron from sklearn.linear_model import PassiveAggressiveClassifier from sklearn.naive_bayes import BernoulliNB, MultinomialNB from sklearn.neighbors import KNeighborsClassifier from sklearn.neighbors import NearestCentroid from sklearn.ensemble import RandomForestClassifier from sklearn.utils.extmath import density from sklearn import metrics # Display progress logs on stdout logging.basicConfig(level=logging.INFO, format='%(asctime)s %(levelname)s %(message)s') # parse commandline arguments op = OptionParser() op.add_option("--report", action="store_true", dest="print_report", help="Print a detailed classification report.") op.add_option("--chi2_select", action="store", type="int", dest="select_chi2", help="Select some number of features using a chi-squared test") op.add_option("--confusion_matrix", action="store_true", dest="print_cm", help="Print the confusion matrix.") op.add_option("--top10", action="store_true", dest="print_top10", help="Print ten most discriminative terms per class" " for every classifier.") op.add_option("--all_categories", action="store_true", dest="all_categories", help="Whether to use all categories or not.") op.add_option("--use_hashing", action="store_true", help="Use a hashing vectorizer.") op.add_option("--n_features", action="store", type=int, default=2 ** 16, help="n_features when using the hashing vectorizer.") op.add_option("--filtered", action="store_true", help="Remove newsgroup information that is easily overfit: " "headers, signatures, and quoting.") (opts, args) = op.parse_args() if len(args) > 0: op.error("this script takes no arguments.") sys.exit(1) print(__doc__) op.print_help() print() ############################################################################### # Load some categories from the training set if opts.all_categories: categories = None else: categories = [ 'alt.atheism', 'talk.religion.misc', 'comp.graphics', 'sci.space', ] if opts.filtered: remove = ('headers', 'footers', 'quotes') else: remove = () print("Loading 20 newsgroups dataset for categories:") print(categories if categories else "all") data_train = fetch_20newsgroups(subset='train', categories=categories, shuffle=True, random_state=42, remove=remove) data_test = fetch_20newsgroups(subset='test', categories=categories, shuffle=True, random_state=42, remove=remove) print('data loaded') categories = data_train.target_names # for case categories == None def size_mb(docs): return sum(len(s.encode('utf-8')) for s in docs) / 1e6 data_train_size_mb = size_mb(data_train.data) data_test_size_mb = size_mb(data_test.data) print("%d documents - %0.3fMB (training set)" % ( len(data_train.data), data_train_size_mb)) print("%d documents - %0.3fMB (test set)" % ( len(data_test.data), data_test_size_mb)) print("%d categories" % len(categories)) print() # split a training set and a test set y_train, y_test = data_train.target, data_test.target print("Extracting features from the training data using a sparse vectorizer") t0 = time() if opts.use_hashing: vectorizer = HashingVectorizer(stop_words='english', non_negative=True, n_features=opts.n_features) X_train = vectorizer.transform(data_train.data) else: vectorizer = TfidfVectorizer(sublinear_tf=True, max_df=0.5, stop_words='english') X_train = vectorizer.fit_transform(data_train.data) duration = time() - t0 print("done in %fs at %0.3fMB/s" % (duration, data_train_size_mb / duration)) print("n_samples: %d, n_features: %d" % X_train.shape) print() print("Extracting features from the test data using the same vectorizer") t0 = time() X_test = vectorizer.transform(data_test.data) duration = time() - t0 print("done in %fs at %0.3fMB/s" % (duration, data_test_size_mb / duration)) print("n_samples: %d, n_features: %d" % X_test.shape) print() # mapping from integer feature name to original token string if opts.use_hashing: feature_names = None else: feature_names = vectorizer.get_feature_names() if opts.select_chi2: print("Extracting %d best features by a chi-squared test" % opts.select_chi2) t0 = time() ch2 = SelectKBest(chi2, k=opts.select_chi2) X_train = ch2.fit_transform(X_train, y_train) X_test = ch2.transform(X_test) if feature_names: # keep selected feature names feature_names = [feature_names[i] for i in ch2.get_support(indices=True)] print("done in %fs" % (time() - t0)) print() if feature_names: feature_names = np.asarray(feature_names) def trim(s): """Trim string to fit on terminal (assuming 80-column display)""" return s if len(s) <= 80 else s[:77] + "..." ############################################################################### # Benchmark classifiers def benchmark(clf): print('_' * 80) print("Training: ") print(clf) t0 = time() clf.fit(X_train, y_train) train_time = time() - t0 print("train time: %0.3fs" % train_time) t0 = time() pred = clf.predict(X_test) test_time = time() - t0 print("test time: %0.3fs" % test_time) score = metrics.accuracy_score(y_test, pred) print("accuracy: %0.3f" % score) if hasattr(clf, 'coef_'): print("dimensionality: %d" % clf.coef_.shape[1]) print("density: %f" % density(clf.coef_)) if opts.print_top10 and feature_names is not None: print("top 10 keywords per class:") for i, category in enumerate(categories): top10 = np.argsort(clf.coef_[i])[-10:] print(trim("%s: %s" % (category, " ".join(feature_names[top10])))) print() if opts.print_report: print("classification report:") print(metrics.classification_report(y_test, pred, target_names=categories)) if opts.print_cm: print("confusion matrix:") print(metrics.confusion_matrix(y_test, pred)) print() clf_descr = str(clf).split('(')[0] return clf_descr, score, train_time, test_time results = [] for clf, name in ( (RidgeClassifier(tol=1e-2, solver="lsqr"), "Ridge Classifier"), (Perceptron(n_iter=50), "Perceptron"), (PassiveAggressiveClassifier(n_iter=50), "Passive-Aggressive"), (KNeighborsClassifier(n_neighbors=10), "kNN"), (RandomForestClassifier(n_estimators=100), "Random forest")): print('=' * 80) print(name) results.append(benchmark(clf)) for penalty in ["l2", "l1"]: print('=' * 80) print("%s penalty" % penalty.upper()) # Train Liblinear model results.append(benchmark(LinearSVC(loss='l2', penalty=penalty, dual=False, tol=1e-3))) # Train SGD model results.append(benchmark(SGDClassifier(alpha=.0001, n_iter=50, penalty=penalty))) # Train SGD with Elastic Net penalty print('=' * 80) print("Elastic-Net penalty") results.append(benchmark(SGDClassifier(alpha=.0001, n_iter=50, penalty="elasticnet"))) # Train NearestCentroid without threshold print('=' * 80) print("NearestCentroid (aka Rocchio classifier)") results.append(benchmark(NearestCentroid())) # Train sparse Naive Bayes classifiers print('=' * 80) print("Naive Bayes") results.append(benchmark(MultinomialNB(alpha=.01))) results.append(benchmark(BernoulliNB(alpha=.01))) print('=' * 80) print("LinearSVC with L1-based feature selection") # The smaller C, the stronger the regularization. # The more regularization, the more sparsity. results.append(benchmark(Pipeline([ ('feature_selection', LinearSVC(penalty="l1", dual=False, tol=1e-3)), ('classification', LinearSVC()) ]))) # make some plots indices = np.arange(len(results)) results = [[x[i] for x in results] for i in range(4)] clf_names, score, training_time, test_time = results training_time = np.array(training_time) / np.max(training_time) test_time = np.array(test_time) / np.max(test_time) plt.figure(figsize=(12, 8)) plt.title("Score") plt.barh(indices, score, .2, label="score", color='navy') plt.barh(indices + .3, training_time, .2, label="training time", color='c') plt.barh(indices + .6, test_time, .2, label="test time", color='darkorange') plt.yticks(()) plt.legend(loc='best') plt.subplots_adjust(left=.25) plt.subplots_adjust(top=.95) plt.subplots_adjust(bottom=.05) for i, c in zip(indices, clf_names): plt.text(-.3, i, c) plt.show()
bsd-3-clause
BiaDarkia/scikit-learn
sklearn/cluster/tests/test_dbscan.py
55
13916
""" Tests for DBSCAN clustering algorithm """ import pickle import numpy as np from scipy.spatial import distance from scipy import sparse from sklearn.utils.testing import assert_equal from sklearn.utils.testing import assert_array_equal from sklearn.utils.testing import assert_raises from sklearn.utils.testing import assert_in from sklearn.utils.testing import assert_not_in from sklearn.neighbors import NearestNeighbors from sklearn.cluster.dbscan_ import DBSCAN from sklearn.cluster.dbscan_ import dbscan from sklearn.cluster.tests.common import generate_clustered_data from sklearn.metrics.pairwise import pairwise_distances n_clusters = 3 X = generate_clustered_data(n_clusters=n_clusters) def test_dbscan_similarity(): # Tests the DBSCAN algorithm with a similarity array. # Parameters chosen specifically for this task. eps = 0.15 min_samples = 10 # Compute similarities D = distance.squareform(distance.pdist(X)) D /= np.max(D) # Compute DBSCAN core_samples, labels = dbscan(D, metric="precomputed", eps=eps, min_samples=min_samples) # number of clusters, ignoring noise if present n_clusters_1 = len(set(labels)) - (1 if -1 in labels else 0) assert_equal(n_clusters_1, n_clusters) db = DBSCAN(metric="precomputed", eps=eps, min_samples=min_samples) labels = db.fit(D).labels_ n_clusters_2 = len(set(labels)) - int(-1 in labels) assert_equal(n_clusters_2, n_clusters) def test_dbscan_feature(): # Tests the DBSCAN algorithm with a feature vector array. # Parameters chosen specifically for this task. # Different eps to other test, because distance is not normalised. eps = 0.8 min_samples = 10 metric = 'euclidean' # Compute DBSCAN # parameters chosen for task core_samples, labels = dbscan(X, metric=metric, eps=eps, min_samples=min_samples) # number of clusters, ignoring noise if present n_clusters_1 = len(set(labels)) - int(-1 in labels) assert_equal(n_clusters_1, n_clusters) db = DBSCAN(metric=metric, eps=eps, min_samples=min_samples) labels = db.fit(X).labels_ n_clusters_2 = len(set(labels)) - int(-1 in labels) assert_equal(n_clusters_2, n_clusters) def test_dbscan_sparse(): core_sparse, labels_sparse = dbscan(sparse.lil_matrix(X), eps=.8, min_samples=10) core_dense, labels_dense = dbscan(X, eps=.8, min_samples=10) assert_array_equal(core_dense, core_sparse) assert_array_equal(labels_dense, labels_sparse) def test_dbscan_sparse_precomputed(): D = pairwise_distances(X) nn = NearestNeighbors(radius=.9).fit(X) D_sparse = nn.radius_neighbors_graph(mode='distance') # Ensure it is sparse not merely on diagonals: assert D_sparse.nnz < D.shape[0] * (D.shape[0] - 1) core_sparse, labels_sparse = dbscan(D_sparse, eps=.8, min_samples=10, metric='precomputed') core_dense, labels_dense = dbscan(D, eps=.8, min_samples=10, metric='precomputed') assert_array_equal(core_dense, core_sparse) assert_array_equal(labels_dense, labels_sparse) def test_dbscan_no_core_samples(): rng = np.random.RandomState(0) X = rng.rand(40, 10) X[X < .8] = 0 for X_ in [X, sparse.csr_matrix(X)]: db = DBSCAN(min_samples=6).fit(X_) assert_array_equal(db.components_, np.empty((0, X_.shape[1]))) assert_array_equal(db.labels_, -1) assert_equal(db.core_sample_indices_.shape, (0,)) def test_dbscan_callable(): # Tests the DBSCAN algorithm with a callable metric. # Parameters chosen specifically for this task. # Different eps to other test, because distance is not normalised. eps = 0.8 min_samples = 10 # metric is the function reference, not the string key. metric = distance.euclidean # Compute DBSCAN # parameters chosen for task core_samples, labels = dbscan(X, metric=metric, eps=eps, min_samples=min_samples, algorithm='ball_tree') # number of clusters, ignoring noise if present n_clusters_1 = len(set(labels)) - int(-1 in labels) assert_equal(n_clusters_1, n_clusters) db = DBSCAN(metric=metric, eps=eps, min_samples=min_samples, algorithm='ball_tree') labels = db.fit(X).labels_ n_clusters_2 = len(set(labels)) - int(-1 in labels) assert_equal(n_clusters_2, n_clusters) def test_dbscan_metric_params(): # Tests that DBSCAN works with the metrics_params argument. eps = 0.8 min_samples = 10 p = 1 # Compute DBSCAN with metric_params arg db = DBSCAN(metric='minkowski', metric_params={'p': p}, eps=eps, min_samples=min_samples, algorithm='ball_tree').fit(X) core_sample_1, labels_1 = db.core_sample_indices_, db.labels_ # Test that sample labels are the same as passing Minkowski 'p' directly db = DBSCAN(metric='minkowski', eps=eps, min_samples=min_samples, algorithm='ball_tree', p=p).fit(X) core_sample_2, labels_2 = db.core_sample_indices_, db.labels_ assert_array_equal(core_sample_1, core_sample_2) assert_array_equal(labels_1, labels_2) # Minkowski with p=1 should be equivalent to Manhattan distance db = DBSCAN(metric='manhattan', eps=eps, min_samples=min_samples, algorithm='ball_tree').fit(X) core_sample_3, labels_3 = db.core_sample_indices_, db.labels_ assert_array_equal(core_sample_1, core_sample_3) assert_array_equal(labels_1, labels_3) def test_dbscan_balltree(): # Tests the DBSCAN algorithm with balltree for neighbor calculation. eps = 0.8 min_samples = 10 D = pairwise_distances(X) core_samples, labels = dbscan(D, metric="precomputed", eps=eps, min_samples=min_samples) # number of clusters, ignoring noise if present n_clusters_1 = len(set(labels)) - int(-1 in labels) assert_equal(n_clusters_1, n_clusters) db = DBSCAN(p=2.0, eps=eps, min_samples=min_samples, algorithm='ball_tree') labels = db.fit(X).labels_ n_clusters_2 = len(set(labels)) - int(-1 in labels) assert_equal(n_clusters_2, n_clusters) db = DBSCAN(p=2.0, eps=eps, min_samples=min_samples, algorithm='kd_tree') labels = db.fit(X).labels_ n_clusters_3 = len(set(labels)) - int(-1 in labels) assert_equal(n_clusters_3, n_clusters) db = DBSCAN(p=1.0, eps=eps, min_samples=min_samples, algorithm='ball_tree') labels = db.fit(X).labels_ n_clusters_4 = len(set(labels)) - int(-1 in labels) assert_equal(n_clusters_4, n_clusters) db = DBSCAN(leaf_size=20, eps=eps, min_samples=min_samples, algorithm='ball_tree') labels = db.fit(X).labels_ n_clusters_5 = len(set(labels)) - int(-1 in labels) assert_equal(n_clusters_5, n_clusters) def test_input_validation(): # DBSCAN.fit should accept a list of lists. X = [[1., 2.], [3., 4.]] DBSCAN().fit(X) # must not raise exception def test_dbscan_badargs(): # Test bad argument values: these should all raise ValueErrors assert_raises(ValueError, dbscan, X, eps=-1.0) assert_raises(ValueError, dbscan, X, algorithm='blah') assert_raises(ValueError, dbscan, X, metric='blah') assert_raises(ValueError, dbscan, X, leaf_size=-1) assert_raises(ValueError, dbscan, X, p=-1) def test_pickle(): obj = DBSCAN() s = pickle.dumps(obj) assert_equal(type(pickle.loads(s)), obj.__class__) def test_boundaries(): # ensure min_samples is inclusive of core point core, _ = dbscan([[0], [1]], eps=2, min_samples=2) assert_in(0, core) # ensure eps is inclusive of circumference core, _ = dbscan([[0], [1], [1]], eps=1, min_samples=2) assert_in(0, core) core, _ = dbscan([[0], [1], [1]], eps=.99, min_samples=2) assert_not_in(0, core) def test_weighted_dbscan(): # ensure sample_weight is validated assert_raises(ValueError, dbscan, [[0], [1]], sample_weight=[2]) assert_raises(ValueError, dbscan, [[0], [1]], sample_weight=[2, 3, 4]) # ensure sample_weight has an effect assert_array_equal([], dbscan([[0], [1]], sample_weight=None, min_samples=6)[0]) assert_array_equal([], dbscan([[0], [1]], sample_weight=[5, 5], min_samples=6)[0]) assert_array_equal([0], dbscan([[0], [1]], sample_weight=[6, 5], min_samples=6)[0]) assert_array_equal([0, 1], dbscan([[0], [1]], sample_weight=[6, 6], min_samples=6)[0]) # points within eps of each other: assert_array_equal([0, 1], dbscan([[0], [1]], eps=1.5, sample_weight=[5, 1], min_samples=6)[0]) # and effect of non-positive and non-integer sample_weight: assert_array_equal([], dbscan([[0], [1]], sample_weight=[5, 0], eps=1.5, min_samples=6)[0]) assert_array_equal([0, 1], dbscan([[0], [1]], sample_weight=[5.9, 0.1], eps=1.5, min_samples=6)[0]) assert_array_equal([0, 1], dbscan([[0], [1]], sample_weight=[6, 0], eps=1.5, min_samples=6)[0]) assert_array_equal([], dbscan([[0], [1]], sample_weight=[6, -1], eps=1.5, min_samples=6)[0]) # for non-negative sample_weight, cores should be identical to repetition rng = np.random.RandomState(42) sample_weight = rng.randint(0, 5, X.shape[0]) core1, label1 = dbscan(X, sample_weight=sample_weight) assert_equal(len(label1), len(X)) X_repeated = np.repeat(X, sample_weight, axis=0) core_repeated, label_repeated = dbscan(X_repeated) core_repeated_mask = np.zeros(X_repeated.shape[0], dtype=bool) core_repeated_mask[core_repeated] = True core_mask = np.zeros(X.shape[0], dtype=bool) core_mask[core1] = True assert_array_equal(np.repeat(core_mask, sample_weight), core_repeated_mask) # sample_weight should work with precomputed distance matrix D = pairwise_distances(X) core3, label3 = dbscan(D, sample_weight=sample_weight, metric='precomputed') assert_array_equal(core1, core3) assert_array_equal(label1, label3) # sample_weight should work with estimator est = DBSCAN().fit(X, sample_weight=sample_weight) core4 = est.core_sample_indices_ label4 = est.labels_ assert_array_equal(core1, core4) assert_array_equal(label1, label4) est = DBSCAN() label5 = est.fit_predict(X, sample_weight=sample_weight) core5 = est.core_sample_indices_ assert_array_equal(core1, core5) assert_array_equal(label1, label5) assert_array_equal(label1, est.labels_) def test_dbscan_core_samples_toy(): X = [[0], [2], [3], [4], [6], [8], [10]] n_samples = len(X) for algorithm in ['brute', 'kd_tree', 'ball_tree']: # Degenerate case: every sample is a core sample, either with its own # cluster or including other close core samples. core_samples, labels = dbscan(X, algorithm=algorithm, eps=1, min_samples=1) assert_array_equal(core_samples, np.arange(n_samples)) assert_array_equal(labels, [0, 1, 1, 1, 2, 3, 4]) # With eps=1 and min_samples=2 only the 3 samples from the denser area # are core samples. All other points are isolated and considered noise. core_samples, labels = dbscan(X, algorithm=algorithm, eps=1, min_samples=2) assert_array_equal(core_samples, [1, 2, 3]) assert_array_equal(labels, [-1, 0, 0, 0, -1, -1, -1]) # Only the sample in the middle of the dense area is core. Its two # neighbors are edge samples. Remaining samples are noise. core_samples, labels = dbscan(X, algorithm=algorithm, eps=1, min_samples=3) assert_array_equal(core_samples, [2]) assert_array_equal(labels, [-1, 0, 0, 0, -1, -1, -1]) # It's no longer possible to extract core samples with eps=1: # everything is noise. core_samples, labels = dbscan(X, algorithm=algorithm, eps=1, min_samples=4) assert_array_equal(core_samples, []) assert_array_equal(labels, -np.ones(n_samples)) def test_dbscan_precomputed_metric_with_degenerate_input_arrays(): # see https://github.com/scikit-learn/scikit-learn/issues/4641 for # more details X = np.eye(10) labels = DBSCAN(eps=0.5, metric='precomputed').fit(X).labels_ assert_equal(len(set(labels)), 1) X = np.zeros((10, 10)) labels = DBSCAN(eps=0.5, metric='precomputed').fit(X).labels_ assert_equal(len(set(labels)), 1) def test_dbscan_precomputed_metric_with_initial_rows_zero(): # sample matrix with initial two row all zero ar = np.array([ [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0, 0.1, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0, 0.1, 0.0, 0.0], [0.0, 0.0, 0.1, 0.1, 0.0, 0.0, 0.3], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.1], [0.0, 0.0, 0.0, 0.0, 0.3, 0.1, 0.0] ]) matrix = sparse.csr_matrix(ar) labels = DBSCAN(eps=0.2, metric='precomputed', min_samples=2).fit(matrix).labels_ assert_array_equal(labels, [-1, -1, 0, 0, 0, 1, 1])
bsd-3-clause
ClimbsRocks/scikit-learn
sklearn/covariance/robust_covariance.py
103
29653
""" Robust location and covariance estimators. Here are implemented estimators that are resistant to outliers. """ # Author: Virgile Fritsch <virgile.fritsch@inria.fr> # # License: BSD 3 clause import warnings import numbers import numpy as np from scipy import linalg from scipy.stats import chi2 from . import empirical_covariance, EmpiricalCovariance from ..utils.extmath import fast_logdet, pinvh from ..utils import check_random_state, check_array # Minimum Covariance Determinant # Implementing of an algorithm by Rousseeuw & Van Driessen described in # (A Fast Algorithm for the Minimum Covariance Determinant Estimator, # 1999, American Statistical Association and the American Society # for Quality, TECHNOMETRICS) # XXX Is this really a public function? It's not listed in the docs or # exported by sklearn.covariance. Deprecate? def c_step(X, n_support, remaining_iterations=30, initial_estimates=None, verbose=False, cov_computation_method=empirical_covariance, random_state=None): """C_step procedure described in [Rouseeuw1984]_ aiming at computing MCD. Parameters ---------- X : array-like, shape (n_samples, n_features) Data set in which we look for the n_support observations whose scatter matrix has minimum determinant. n_support : int, > n_samples / 2 Number of observations to compute the robust estimates of location and covariance from. remaining_iterations : int, optional Number of iterations to perform. According to [Rouseeuw1999]_, two iterations are sufficient to get close to the minimum, and we never need more than 30 to reach convergence. initial_estimates : 2-tuple, optional Initial estimates of location and shape from which to run the c_step procedure: - initial_estimates[0]: an initial location estimate - initial_estimates[1]: an initial covariance estimate verbose : boolean, optional Verbose mode. random_state : integer or numpy.RandomState, optional The random generator used. If an integer is given, it fixes the seed. Defaults to the global numpy random number generator. cov_computation_method : callable, default empirical_covariance The function which will be used to compute the covariance. Must return shape (n_features, n_features) Returns ------- location : array-like, shape (n_features,) Robust location estimates. covariance : array-like, shape (n_features, n_features) Robust covariance estimates. support : array-like, shape (n_samples,) A mask for the `n_support` observations whose scatter matrix has minimum determinant. References ---------- .. [Rouseeuw1999] A Fast Algorithm for the Minimum Covariance Determinant Estimator, 1999, American Statistical Association and the American Society for Quality, TECHNOMETRICS """ X = np.asarray(X) random_state = check_random_state(random_state) return _c_step(X, n_support, remaining_iterations=remaining_iterations, initial_estimates=initial_estimates, verbose=verbose, cov_computation_method=cov_computation_method, random_state=random_state) def _c_step(X, n_support, random_state, remaining_iterations=30, initial_estimates=None, verbose=False, cov_computation_method=empirical_covariance): n_samples, n_features = X.shape # Initialisation support = np.zeros(n_samples, dtype=bool) if initial_estimates is None: # compute initial robust estimates from a random subset support[random_state.permutation(n_samples)[:n_support]] = True else: # get initial robust estimates from the function parameters location = initial_estimates[0] covariance = initial_estimates[1] # run a special iteration for that case (to get an initial support) precision = pinvh(covariance) X_centered = X - location dist = (np.dot(X_centered, precision) * X_centered).sum(1) # compute new estimates support[np.argsort(dist)[:n_support]] = True X_support = X[support] location = X_support.mean(0) covariance = cov_computation_method(X_support) # Iterative procedure for Minimum Covariance Determinant computation det = fast_logdet(covariance) previous_det = np.inf while (det < previous_det) and (remaining_iterations > 0): # save old estimates values previous_location = location previous_covariance = covariance previous_det = det previous_support = support # compute a new support from the full data set mahalanobis distances precision = pinvh(covariance) X_centered = X - location dist = (np.dot(X_centered, precision) * X_centered).sum(axis=1) # compute new estimates support = np.zeros(n_samples, dtype=bool) support[np.argsort(dist)[:n_support]] = True X_support = X[support] location = X_support.mean(axis=0) covariance = cov_computation_method(X_support) det = fast_logdet(covariance) # update remaining iterations for early stopping remaining_iterations -= 1 previous_dist = dist dist = (np.dot(X - location, precision) * (X - location)).sum(axis=1) # Catch computation errors if np.isinf(det): raise ValueError( "Singular covariance matrix. " "Please check that the covariance matrix corresponding " "to the dataset is full rank and that MinCovDet is used with " "Gaussian-distributed data (or at least data drawn from a " "unimodal, symmetric distribution.") # Check convergence if np.allclose(det, previous_det): # c_step procedure converged if verbose: print("Optimal couple (location, covariance) found before" " ending iterations (%d left)" % (remaining_iterations)) results = location, covariance, det, support, dist elif det > previous_det: # determinant has increased (should not happen) warnings.warn("Warning! det > previous_det (%.15f > %.15f)" % (det, previous_det), RuntimeWarning) results = previous_location, previous_covariance, \ previous_det, previous_support, previous_dist # Check early stopping if remaining_iterations == 0: if verbose: print('Maximum number of iterations reached') results = location, covariance, det, support, dist return results def select_candidates(X, n_support, n_trials, select=1, n_iter=30, verbose=False, cov_computation_method=empirical_covariance, random_state=None): """Finds the best pure subset of observations to compute MCD from it. The purpose of this function is to find the best sets of n_support observations with respect to a minimization of their covariance matrix determinant. Equivalently, it removes n_samples-n_support observations to construct what we call a pure data set (i.e. not containing outliers). The list of the observations of the pure data set is referred to as the `support`. Starting from a random support, the pure data set is found by the c_step procedure introduced by Rousseeuw and Van Driessen in [Rouseeuw1999]_. Parameters ---------- X : array-like, shape (n_samples, n_features) Data (sub)set in which we look for the n_support purest observations. n_support : int, [(n + p + 1)/2] < n_support < n The number of samples the pure data set must contain. select : int, int > 0 Number of best candidates results to return. n_trials : int, nb_trials > 0 or 2-tuple Number of different initial sets of observations from which to run the algorithm. Instead of giving a number of trials to perform, one can provide a list of initial estimates that will be used to iteratively run c_step procedures. In this case: - n_trials[0]: array-like, shape (n_trials, n_features) is the list of `n_trials` initial location estimates - n_trials[1]: array-like, shape (n_trials, n_features, n_features) is the list of `n_trials` initial covariances estimates n_iter : int, nb_iter > 0 Maximum number of iterations for the c_step procedure. (2 is enough to be close to the final solution. "Never" exceeds 20). random_state : integer or numpy.RandomState, default None The random generator used. If an integer is given, it fixes the seed. Defaults to the global numpy random number generator. cov_computation_method : callable, default empirical_covariance The function which will be used to compute the covariance. Must return shape (n_features, n_features) verbose : boolean, default False Control the output verbosity. See Also --------- c_step Returns ------- best_locations : array-like, shape (select, n_features) The `select` location estimates computed from the `select` best supports found in the data set (`X`). best_covariances : array-like, shape (select, n_features, n_features) The `select` covariance estimates computed from the `select` best supports found in the data set (`X`). best_supports : array-like, shape (select, n_samples) The `select` best supports found in the data set (`X`). References ---------- .. [Rouseeuw1999] A Fast Algorithm for the Minimum Covariance Determinant Estimator, 1999, American Statistical Association and the American Society for Quality, TECHNOMETRICS """ random_state = check_random_state(random_state) n_samples, n_features = X.shape if isinstance(n_trials, numbers.Integral): run_from_estimates = False elif isinstance(n_trials, tuple): run_from_estimates = True estimates_list = n_trials n_trials = estimates_list[0].shape[0] else: raise TypeError("Invalid 'n_trials' parameter, expected tuple or " " integer, got %s (%s)" % (n_trials, type(n_trials))) # compute `n_trials` location and shape estimates candidates in the subset all_estimates = [] if not run_from_estimates: # perform `n_trials` computations from random initial supports for j in range(n_trials): all_estimates.append( _c_step( X, n_support, remaining_iterations=n_iter, verbose=verbose, cov_computation_method=cov_computation_method, random_state=random_state)) else: # perform computations from every given initial estimates for j in range(n_trials): initial_estimates = (estimates_list[0][j], estimates_list[1][j]) all_estimates.append(_c_step( X, n_support, remaining_iterations=n_iter, initial_estimates=initial_estimates, verbose=verbose, cov_computation_method=cov_computation_method, random_state=random_state)) all_locs_sub, all_covs_sub, all_dets_sub, all_supports_sub, all_ds_sub = \ zip(*all_estimates) # find the `n_best` best results among the `n_trials` ones index_best = np.argsort(all_dets_sub)[:select] best_locations = np.asarray(all_locs_sub)[index_best] best_covariances = np.asarray(all_covs_sub)[index_best] best_supports = np.asarray(all_supports_sub)[index_best] best_ds = np.asarray(all_ds_sub)[index_best] return best_locations, best_covariances, best_supports, best_ds def fast_mcd(X, support_fraction=None, cov_computation_method=empirical_covariance, random_state=None): """Estimates the Minimum Covariance Determinant matrix. Read more in the :ref:`User Guide <robust_covariance>`. Parameters ---------- X : array-like, shape (n_samples, n_features) The data matrix, with p features and n samples. support_fraction : float, 0 < support_fraction < 1 The proportion of points to be included in the support of the raw MCD estimate. Default is None, which implies that the minimum value of support_fraction will be used within the algorithm: `[n_sample + n_features + 1] / 2`. random_state : integer or numpy.RandomState, optional The generator used to randomly subsample. If an integer is given, it fixes the seed. Defaults to the global numpy random number generator. cov_computation_method : callable, default empirical_covariance The function which will be used to compute the covariance. Must return shape (n_features, n_features) Notes ----- The FastMCD algorithm has been introduced by Rousseuw and Van Driessen in "A Fast Algorithm for the Minimum Covariance Determinant Estimator, 1999, American Statistical Association and the American Society for Quality, TECHNOMETRICS". The principle is to compute robust estimates and random subsets before pooling them into a larger subsets, and finally into the full data set. Depending on the size of the initial sample, we have one, two or three such computation levels. Note that only raw estimates are returned. If one is interested in the correction and reweighting steps described in [Rouseeuw1999]_, see the MinCovDet object. References ---------- .. [Rouseeuw1999] A Fast Algorithm for the Minimum Covariance Determinant Estimator, 1999, American Statistical Association and the American Society for Quality, TECHNOMETRICS .. [Butler1993] R. W. Butler, P. L. Davies and M. Jhun, Asymptotics For The Minimum Covariance Determinant Estimator, The Annals of Statistics, 1993, Vol. 21, No. 3, 1385-1400 Returns ------- location : array-like, shape (n_features,) Robust location of the data. covariance : array-like, shape (n_features, n_features) Robust covariance of the features. support : array-like, type boolean, shape (n_samples,) A mask of the observations that have been used to compute the robust location and covariance estimates of the data set. """ random_state = check_random_state(random_state) X = check_array(X, ensure_min_samples=2, estimator='fast_mcd') n_samples, n_features = X.shape # minimum breakdown value if support_fraction is None: n_support = int(np.ceil(0.5 * (n_samples + n_features + 1))) else: n_support = int(support_fraction * n_samples) # 1-dimensional case quick computation # (Rousseeuw, P. J. and Leroy, A. M. (2005) References, in Robust # Regression and Outlier Detection, John Wiley & Sons, chapter 4) if n_features == 1: if n_support < n_samples: # find the sample shortest halves X_sorted = np.sort(np.ravel(X)) diff = X_sorted[n_support:] - X_sorted[:(n_samples - n_support)] halves_start = np.where(diff == np.min(diff))[0] # take the middle points' mean to get the robust location estimate location = 0.5 * (X_sorted[n_support + halves_start] + X_sorted[halves_start]).mean() support = np.zeros(n_samples, dtype=bool) X_centered = X - location support[np.argsort(np.abs(X_centered), 0)[:n_support]] = True covariance = np.asarray([[np.var(X[support])]]) location = np.array([location]) # get precision matrix in an optimized way precision = pinvh(covariance) dist = (np.dot(X_centered, precision) * (X_centered)).sum(axis=1) else: support = np.ones(n_samples, dtype=bool) covariance = np.asarray([[np.var(X)]]) location = np.asarray([np.mean(X)]) X_centered = X - location # get precision matrix in an optimized way precision = pinvh(covariance) dist = (np.dot(X_centered, precision) * (X_centered)).sum(axis=1) # Starting FastMCD algorithm for p-dimensional case if (n_samples > 500) and (n_features > 1): # 1. Find candidate supports on subsets # a. split the set in subsets of size ~ 300 n_subsets = n_samples // 300 n_samples_subsets = n_samples // n_subsets samples_shuffle = random_state.permutation(n_samples) h_subset = int(np.ceil(n_samples_subsets * (n_support / float(n_samples)))) # b. perform a total of 500 trials n_trials_tot = 500 # c. select 10 best (location, covariance) for each subset n_best_sub = 10 n_trials = max(10, n_trials_tot // n_subsets) n_best_tot = n_subsets * n_best_sub all_best_locations = np.zeros((n_best_tot, n_features)) try: all_best_covariances = np.zeros((n_best_tot, n_features, n_features)) except MemoryError: # The above is too big. Let's try with something much small # (and less optimal) all_best_covariances = np.zeros((n_best_tot, n_features, n_features)) n_best_tot = 10 n_best_sub = 2 for i in range(n_subsets): low_bound = i * n_samples_subsets high_bound = low_bound + n_samples_subsets current_subset = X[samples_shuffle[low_bound:high_bound]] best_locations_sub, best_covariances_sub, _, _ = select_candidates( current_subset, h_subset, n_trials, select=n_best_sub, n_iter=2, cov_computation_method=cov_computation_method, random_state=random_state) subset_slice = np.arange(i * n_best_sub, (i + 1) * n_best_sub) all_best_locations[subset_slice] = best_locations_sub all_best_covariances[subset_slice] = best_covariances_sub # 2. Pool the candidate supports into a merged set # (possibly the full dataset) n_samples_merged = min(1500, n_samples) h_merged = int(np.ceil(n_samples_merged * (n_support / float(n_samples)))) if n_samples > 1500: n_best_merged = 10 else: n_best_merged = 1 # find the best couples (location, covariance) on the merged set selection = random_state.permutation(n_samples)[:n_samples_merged] locations_merged, covariances_merged, supports_merged, d = \ select_candidates( X[selection], h_merged, n_trials=(all_best_locations, all_best_covariances), select=n_best_merged, cov_computation_method=cov_computation_method, random_state=random_state) # 3. Finally get the overall best (locations, covariance) couple if n_samples < 1500: # directly get the best couple (location, covariance) location = locations_merged[0] covariance = covariances_merged[0] support = np.zeros(n_samples, dtype=bool) dist = np.zeros(n_samples) support[selection] = supports_merged[0] dist[selection] = d[0] else: # select the best couple on the full dataset locations_full, covariances_full, supports_full, d = \ select_candidates( X, n_support, n_trials=(locations_merged, covariances_merged), select=1, cov_computation_method=cov_computation_method, random_state=random_state) location = locations_full[0] covariance = covariances_full[0] support = supports_full[0] dist = d[0] elif n_features > 1: # 1. Find the 10 best couples (location, covariance) # considering two iterations n_trials = 30 n_best = 10 locations_best, covariances_best, _, _ = select_candidates( X, n_support, n_trials=n_trials, select=n_best, n_iter=2, cov_computation_method=cov_computation_method, random_state=random_state) # 2. Select the best couple on the full dataset amongst the 10 locations_full, covariances_full, supports_full, d = select_candidates( X, n_support, n_trials=(locations_best, covariances_best), select=1, cov_computation_method=cov_computation_method, random_state=random_state) location = locations_full[0] covariance = covariances_full[0] support = supports_full[0] dist = d[0] return location, covariance, support, dist class MinCovDet(EmpiricalCovariance): """Minimum Covariance Determinant (MCD): robust estimator of covariance. The Minimum Covariance Determinant covariance estimator is to be applied on Gaussian-distributed data, but could still be relevant on data drawn from a unimodal, symmetric distribution. It is not meant to be used with multi-modal data (the algorithm used to fit a MinCovDet object is likely to fail in such a case). One should consider projection pursuit methods to deal with multi-modal datasets. Read more in the :ref:`User Guide <robust_covariance>`. Parameters ---------- store_precision : bool Specify if the estimated precision is stored. assume_centered : Boolean If True, the support of the robust location and the covariance estimates is computed, and a covariance estimate is recomputed from it, without centering the data. Useful to work with data whose mean is significantly equal to zero but is not exactly zero. If False, the robust location and covariance are directly computed with the FastMCD algorithm without additional treatment. support_fraction : float, 0 < support_fraction < 1 The proportion of points to be included in the support of the raw MCD estimate. Default is None, which implies that the minimum value of support_fraction will be used within the algorithm: [n_sample + n_features + 1] / 2 random_state : integer or numpy.RandomState, optional The random generator used. If an integer is given, it fixes the seed. Defaults to the global numpy random number generator. Attributes ---------- raw_location_ : array-like, shape (n_features,) The raw robust estimated location before correction and re-weighting. raw_covariance_ : array-like, shape (n_features, n_features) The raw robust estimated covariance before correction and re-weighting. raw_support_ : array-like, shape (n_samples,) A mask of the observations that have been used to compute the raw robust estimates of location and shape, before correction and re-weighting. location_ : array-like, shape (n_features,) Estimated robust location covariance_ : array-like, shape (n_features, n_features) Estimated robust covariance matrix precision_ : array-like, shape (n_features, n_features) Estimated pseudo inverse matrix. (stored only if store_precision is True) support_ : array-like, shape (n_samples,) A mask of the observations that have been used to compute the robust estimates of location and shape. dist_ : array-like, shape (n_samples,) Mahalanobis distances of the training set (on which `fit` is called) observations. References ---------- .. [Rouseeuw1984] `P. J. Rousseeuw. Least median of squares regression. J. Am Stat Ass, 79:871, 1984.` .. [Rouseeuw1999] `A Fast Algorithm for the Minimum Covariance Determinant Estimator, 1999, American Statistical Association and the American Society for Quality, TECHNOMETRICS` .. [Butler1993] `R. W. Butler, P. L. Davies and M. Jhun, Asymptotics For The Minimum Covariance Determinant Estimator, The Annals of Statistics, 1993, Vol. 21, No. 3, 1385-1400` """ _nonrobust_covariance = staticmethod(empirical_covariance) def __init__(self, store_precision=True, assume_centered=False, support_fraction=None, random_state=None): self.store_precision = store_precision self.assume_centered = assume_centered self.support_fraction = support_fraction self.random_state = random_state def fit(self, X, y=None): """Fits a Minimum Covariance Determinant with the FastMCD algorithm. Parameters ---------- X : array-like, shape = [n_samples, n_features] Training data, where n_samples is the number of samples and n_features is the number of features. y : not used, present for API consistence purpose. Returns ------- self : object Returns self. """ X = check_array(X, ensure_min_samples=2, estimator='MinCovDet') random_state = check_random_state(self.random_state) n_samples, n_features = X.shape # check that the empirical covariance is full rank if (linalg.svdvals(np.dot(X.T, X)) > 1e-8).sum() != n_features: warnings.warn("The covariance matrix associated to your dataset " "is not full rank") # compute and store raw estimates raw_location, raw_covariance, raw_support, raw_dist = fast_mcd( X, support_fraction=self.support_fraction, cov_computation_method=self._nonrobust_covariance, random_state=random_state) if self.assume_centered: raw_location = np.zeros(n_features) raw_covariance = self._nonrobust_covariance(X[raw_support], assume_centered=True) # get precision matrix in an optimized way precision = pinvh(raw_covariance) raw_dist = np.sum(np.dot(X, precision) * X, 1) self.raw_location_ = raw_location self.raw_covariance_ = raw_covariance self.raw_support_ = raw_support self.location_ = raw_location self.support_ = raw_support self.dist_ = raw_dist # obtain consistency at normal models self.correct_covariance(X) # re-weight estimator self.reweight_covariance(X) return self def correct_covariance(self, data): """Apply a correction to raw Minimum Covariance Determinant estimates. Correction using the empirical correction factor suggested by Rousseeuw and Van Driessen in [Rouseeuw1984]_. Parameters ---------- data : array-like, shape (n_samples, n_features) The data matrix, with p features and n samples. The data set must be the one which was used to compute the raw estimates. Returns ------- covariance_corrected : array-like, shape (n_features, n_features) Corrected robust covariance estimate. """ correction = np.median(self.dist_) / chi2(data.shape[1]).isf(0.5) covariance_corrected = self.raw_covariance_ * correction self.dist_ /= correction return covariance_corrected def reweight_covariance(self, data): """Re-weight raw Minimum Covariance Determinant estimates. Re-weight observations using Rousseeuw's method (equivalent to deleting outlying observations from the data set before computing location and covariance estimates). [Rouseeuw1984]_ Parameters ---------- data : array-like, shape (n_samples, n_features) The data matrix, with p features and n samples. The data set must be the one which was used to compute the raw estimates. Returns ------- location_reweighted : array-like, shape (n_features, ) Re-weighted robust location estimate. covariance_reweighted : array-like, shape (n_features, n_features) Re-weighted robust covariance estimate. support_reweighted : array-like, type boolean, shape (n_samples,) A mask of the observations that have been used to compute the re-weighted robust location and covariance estimates. """ n_samples, n_features = data.shape mask = self.dist_ < chi2(n_features).isf(0.025) if self.assume_centered: location_reweighted = np.zeros(n_features) else: location_reweighted = data[mask].mean(0) covariance_reweighted = self._nonrobust_covariance( data[mask], assume_centered=self.assume_centered) support_reweighted = np.zeros(n_samples, dtype=bool) support_reweighted[mask] = True self._set_covariance(covariance_reweighted) self.location_ = location_reweighted self.support_ = support_reweighted X_centered = data - self.location_ self.dist_ = np.sum( np.dot(X_centered, self.get_precision()) * X_centered, 1) return location_reweighted, covariance_reweighted, support_reweighted
bsd-3-clause
DSLituiev/scikit-learn
examples/hetero_feature_union.py
286
6236
""" ============================================= Feature Union with Heterogeneous Data Sources ============================================= Datasets can often contain components of that require different feature extraction and processing pipelines. This scenario might occur when: 1. Your dataset consists of heterogeneous data types (e.g. raster images and text captions) 2. Your dataset is stored in a Pandas DataFrame and different columns require different processing pipelines. This example demonstrates how to use :class:`sklearn.feature_extraction.FeatureUnion` on a dataset containing different types of features. We use the 20-newsgroups dataset and compute standard bag-of-words features for the subject line and body in separate pipelines as well as ad hoc features on the body. We combine them (with weights) using a FeatureUnion and finally train a classifier on the combined set of features. The choice of features is not particularly helpful, but serves to illustrate the technique. """ # Author: Matt Terry <matt.terry@gmail.com> # # License: BSD 3 clause from __future__ import print_function import numpy as np from sklearn.base import BaseEstimator, TransformerMixin from sklearn.datasets import fetch_20newsgroups from sklearn.datasets.twenty_newsgroups import strip_newsgroup_footer from sklearn.datasets.twenty_newsgroups import strip_newsgroup_quoting from sklearn.decomposition import TruncatedSVD from sklearn.feature_extraction import DictVectorizer from sklearn.feature_extraction.text import TfidfVectorizer from sklearn.metrics import classification_report from sklearn.pipeline import FeatureUnion from sklearn.pipeline import Pipeline from sklearn.svm import SVC class ItemSelector(BaseEstimator, TransformerMixin): """For data grouped by feature, select subset of data at a provided key. The data is expected to be stored in a 2D data structure, where the first index is over features and the second is over samples. i.e. >> len(data[key]) == n_samples Please note that this is the opposite convention to sklearn feature matrixes (where the first index corresponds to sample). ItemSelector only requires that the collection implement getitem (data[key]). Examples include: a dict of lists, 2D numpy array, Pandas DataFrame, numpy record array, etc. >> data = {'a': [1, 5, 2, 5, 2, 8], 'b': [9, 4, 1, 4, 1, 3]} >> ds = ItemSelector(key='a') >> data['a'] == ds.transform(data) ItemSelector is not designed to handle data grouped by sample. (e.g. a list of dicts). If your data is structured this way, consider a transformer along the lines of `sklearn.feature_extraction.DictVectorizer`. Parameters ---------- key : hashable, required The key corresponding to the desired value in a mappable. """ def __init__(self, key): self.key = key def fit(self, x, y=None): return self def transform(self, data_dict): return data_dict[self.key] class TextStats(BaseEstimator, TransformerMixin): """Extract features from each document for DictVectorizer""" def fit(self, x, y=None): return self def transform(self, posts): return [{'length': len(text), 'num_sentences': text.count('.')} for text in posts] class SubjectBodyExtractor(BaseEstimator, TransformerMixin): """Extract the subject & body from a usenet post in a single pass. Takes a sequence of strings and produces a dict of sequences. Keys are `subject` and `body`. """ def fit(self, x, y=None): return self def transform(self, posts): features = np.recarray(shape=(len(posts),), dtype=[('subject', object), ('body', object)]) for i, text in enumerate(posts): headers, _, bod = text.partition('\n\n') bod = strip_newsgroup_footer(bod) bod = strip_newsgroup_quoting(bod) features['body'][i] = bod prefix = 'Subject:' sub = '' for line in headers.split('\n'): if line.startswith(prefix): sub = line[len(prefix):] break features['subject'][i] = sub return features pipeline = Pipeline([ # Extract the subject & body ('subjectbody', SubjectBodyExtractor()), # Use FeatureUnion to combine the features from subject and body ('union', FeatureUnion( transformer_list=[ # Pipeline for pulling features from the post's subject line ('subject', Pipeline([ ('selector', ItemSelector(key='subject')), ('tfidf', TfidfVectorizer(min_df=50)), ])), # Pipeline for standard bag-of-words model for body ('body_bow', Pipeline([ ('selector', ItemSelector(key='body')), ('tfidf', TfidfVectorizer()), ('best', TruncatedSVD(n_components=50)), ])), # Pipeline for pulling ad hoc features from post's body ('body_stats', Pipeline([ ('selector', ItemSelector(key='body')), ('stats', TextStats()), # returns a list of dicts ('vect', DictVectorizer()), # list of dicts -> feature matrix ])), ], # weight components in FeatureUnion transformer_weights={ 'subject': 0.8, 'body_bow': 0.5, 'body_stats': 1.0, }, )), # Use a SVC classifier on the combined features ('svc', SVC(kernel='linear')), ]) # limit the list of categories to make running this exmaple faster. categories = ['alt.atheism', 'talk.religion.misc'] train = fetch_20newsgroups(random_state=1, subset='train', categories=categories, ) test = fetch_20newsgroups(random_state=1, subset='test', categories=categories, ) pipeline.fit(train.data, train.target) y = pipeline.predict(test.data) print(classification_report(y, test.target))
bsd-3-clause
untom/scikit-learn
sklearn/cluster/tests/test_birch.py
339
5603
""" Tests for the birch clustering algorithm. """ from scipy import sparse import numpy as np from sklearn.cluster.tests.common import generate_clustered_data from sklearn.cluster.birch import Birch from sklearn.cluster.hierarchical import AgglomerativeClustering from sklearn.datasets import make_blobs from sklearn.linear_model import ElasticNet from sklearn.metrics import pairwise_distances_argmin, v_measure_score from sklearn.utils.testing import assert_greater_equal from sklearn.utils.testing import assert_equal from sklearn.utils.testing import assert_greater from sklearn.utils.testing import assert_almost_equal from sklearn.utils.testing import assert_array_equal from sklearn.utils.testing import assert_raises from sklearn.utils.testing import assert_warns def test_n_samples_leaves_roots(): # Sanity check for the number of samples in leaves and roots X, y = make_blobs(n_samples=10) brc = Birch() brc.fit(X) n_samples_root = sum([sc.n_samples_ for sc in brc.root_.subclusters_]) n_samples_leaves = sum([sc.n_samples_ for leaf in brc._get_leaves() for sc in leaf.subclusters_]) assert_equal(n_samples_leaves, X.shape[0]) assert_equal(n_samples_root, X.shape[0]) def test_partial_fit(): # Test that fit is equivalent to calling partial_fit multiple times X, y = make_blobs(n_samples=100) brc = Birch(n_clusters=3) brc.fit(X) brc_partial = Birch(n_clusters=None) brc_partial.partial_fit(X[:50]) brc_partial.partial_fit(X[50:]) assert_array_equal(brc_partial.subcluster_centers_, brc.subcluster_centers_) # Test that same global labels are obtained after calling partial_fit # with None brc_partial.set_params(n_clusters=3) brc_partial.partial_fit(None) assert_array_equal(brc_partial.subcluster_labels_, brc.subcluster_labels_) def test_birch_predict(): # Test the predict method predicts the nearest centroid. rng = np.random.RandomState(0) X = generate_clustered_data(n_clusters=3, n_features=3, n_samples_per_cluster=10) # n_samples * n_samples_per_cluster shuffle_indices = np.arange(30) rng.shuffle(shuffle_indices) X_shuffle = X[shuffle_indices, :] brc = Birch(n_clusters=4, threshold=1.) brc.fit(X_shuffle) centroids = brc.subcluster_centers_ assert_array_equal(brc.labels_, brc.predict(X_shuffle)) nearest_centroid = pairwise_distances_argmin(X_shuffle, centroids) assert_almost_equal(v_measure_score(nearest_centroid, brc.labels_), 1.0) def test_n_clusters(): # Test that n_clusters param works properly X, y = make_blobs(n_samples=100, centers=10) brc1 = Birch(n_clusters=10) brc1.fit(X) assert_greater(len(brc1.subcluster_centers_), 10) assert_equal(len(np.unique(brc1.labels_)), 10) # Test that n_clusters = Agglomerative Clustering gives # the same results. gc = AgglomerativeClustering(n_clusters=10) brc2 = Birch(n_clusters=gc) brc2.fit(X) assert_array_equal(brc1.subcluster_labels_, brc2.subcluster_labels_) assert_array_equal(brc1.labels_, brc2.labels_) # Test that the wrong global clustering step raises an Error. clf = ElasticNet() brc3 = Birch(n_clusters=clf) assert_raises(ValueError, brc3.fit, X) # Test that a small number of clusters raises a warning. brc4 = Birch(threshold=10000.) assert_warns(UserWarning, brc4.fit, X) def test_sparse_X(): # Test that sparse and dense data give same results X, y = make_blobs(n_samples=100, centers=10) brc = Birch(n_clusters=10) brc.fit(X) csr = sparse.csr_matrix(X) brc_sparse = Birch(n_clusters=10) brc_sparse.fit(csr) assert_array_equal(brc.labels_, brc_sparse.labels_) assert_array_equal(brc.subcluster_centers_, brc_sparse.subcluster_centers_) def check_branching_factor(node, branching_factor): subclusters = node.subclusters_ assert_greater_equal(branching_factor, len(subclusters)) for cluster in subclusters: if cluster.child_: check_branching_factor(cluster.child_, branching_factor) def test_branching_factor(): # Test that nodes have at max branching_factor number of subclusters X, y = make_blobs() branching_factor = 9 # Purposefully set a low threshold to maximize the subclusters. brc = Birch(n_clusters=None, branching_factor=branching_factor, threshold=0.01) brc.fit(X) check_branching_factor(brc.root_, branching_factor) brc = Birch(n_clusters=3, branching_factor=branching_factor, threshold=0.01) brc.fit(X) check_branching_factor(brc.root_, branching_factor) # Raises error when branching_factor is set to one. brc = Birch(n_clusters=None, branching_factor=1, threshold=0.01) assert_raises(ValueError, brc.fit, X) def check_threshold(birch_instance, threshold): """Use the leaf linked list for traversal""" current_leaf = birch_instance.dummy_leaf_.next_leaf_ while current_leaf: subclusters = current_leaf.subclusters_ for sc in subclusters: assert_greater_equal(threshold, sc.radius) current_leaf = current_leaf.next_leaf_ def test_threshold(): # Test that the leaf subclusters have a threshold lesser than radius X, y = make_blobs(n_samples=80, centers=4) brc = Birch(threshold=0.5, n_clusters=None) brc.fit(X) check_threshold(brc, 0.5) brc = Birch(threshold=5.0, n_clusters=None) brc.fit(X) check_threshold(brc, 5.)
bsd-3-clause
ClimbsRocks/scikit-learn
sklearn/manifold/locally_linear.py
36
25852
"""Locally Linear Embedding""" # Author: Fabian Pedregosa -- <fabian.pedregosa@inria.fr> # Jake Vanderplas -- <vanderplas@astro.washington.edu> # License: BSD 3 clause (C) INRIA 2011 import numpy as np from scipy.linalg import eigh, svd, qr, solve from scipy.sparse import eye, csr_matrix from ..base import BaseEstimator, TransformerMixin from ..utils import check_random_state, check_array from ..utils.arpack import eigsh from ..utils.validation import check_is_fitted from ..utils.validation import FLOAT_DTYPES from ..neighbors import NearestNeighbors def barycenter_weights(X, Z, reg=1e-3): """Compute barycenter weights of X from Y along the first axis We estimate the weights to assign to each point in Y[i] to recover the point X[i]. The barycenter weights sum to 1. Parameters ---------- X : array-like, shape (n_samples, n_dim) Z : array-like, shape (n_samples, n_neighbors, n_dim) reg: float, optional amount of regularization to add for the problem to be well-posed in the case of n_neighbors > n_dim Returns ------- B : array-like, shape (n_samples, n_neighbors) Notes ----- See developers note for more information. """ X = check_array(X, dtype=FLOAT_DTYPES) Z = check_array(Z, dtype=FLOAT_DTYPES, allow_nd=True) n_samples, n_neighbors = X.shape[0], Z.shape[1] B = np.empty((n_samples, n_neighbors), dtype=X.dtype) v = np.ones(n_neighbors, dtype=X.dtype) # this might raise a LinalgError if G is singular and has trace # zero for i, A in enumerate(Z.transpose(0, 2, 1)): C = A.T - X[i] # broadcasting G = np.dot(C, C.T) trace = np.trace(G) if trace > 0: R = reg * trace else: R = reg G.flat[::Z.shape[1] + 1] += R w = solve(G, v, sym_pos=True) B[i, :] = w / np.sum(w) return B def barycenter_kneighbors_graph(X, n_neighbors, reg=1e-3, n_jobs=1): """Computes the barycenter weighted graph of k-Neighbors for points in X Parameters ---------- X : {array-like, sparse matrix, BallTree, KDTree, NearestNeighbors} Sample data, shape = (n_samples, n_features), in the form of a numpy array, sparse array, precomputed tree, or NearestNeighbors object. n_neighbors : int Number of neighbors for each sample. reg : float, optional Amount of regularization when solving the least-squares problem. Only relevant if mode='barycenter'. If None, use the default. n_jobs : int, optional (default = 1) The number of parallel jobs to run for neighbors search. If ``-1``, then the number of jobs is set to the number of CPU cores. Returns ------- A : sparse matrix in CSR format, shape = [n_samples, n_samples] A[i, j] is assigned the weight of edge that connects i to j. See also -------- sklearn.neighbors.kneighbors_graph sklearn.neighbors.radius_neighbors_graph """ knn = NearestNeighbors(n_neighbors + 1, n_jobs=n_jobs).fit(X) X = knn._fit_X n_samples = X.shape[0] ind = knn.kneighbors(X, return_distance=False)[:, 1:] data = barycenter_weights(X, X[ind], reg=reg) indptr = np.arange(0, n_samples * n_neighbors + 1, n_neighbors) return csr_matrix((data.ravel(), ind.ravel(), indptr), shape=(n_samples, n_samples)) def null_space(M, k, k_skip=1, eigen_solver='arpack', tol=1E-6, max_iter=100, random_state=None): """ Find the null space of a matrix M. Parameters ---------- M : {array, matrix, sparse matrix, LinearOperator} Input covariance matrix: should be symmetric positive semi-definite k : integer Number of eigenvalues/vectors to return k_skip : integer, optional Number of low eigenvalues to skip. eigen_solver : string, {'auto', 'arpack', 'dense'} auto : algorithm will attempt to choose the best method for input data arpack : use arnoldi iteration in shift-invert mode. For this method, M may be a dense matrix, sparse matrix, or general linear operator. Warning: ARPACK can be unstable for some problems. It is best to try several random seeds in order to check results. dense : use standard dense matrix operations for the eigenvalue decomposition. For this method, M must be an array or matrix type. This method should be avoided for large problems. tol : float, optional Tolerance for 'arpack' method. Not used if eigen_solver=='dense'. max_iter : maximum number of iterations for 'arpack' method not used if eigen_solver=='dense' random_state: numpy.RandomState or int, optional The generator or seed used to determine the starting vector for arpack iterations. Defaults to numpy.random. """ if eigen_solver == 'auto': if M.shape[0] > 200 and k + k_skip < 10: eigen_solver = 'arpack' else: eigen_solver = 'dense' if eigen_solver == 'arpack': random_state = check_random_state(random_state) # initialize with [-1,1] as in ARPACK v0 = random_state.uniform(-1, 1, M.shape[0]) try: eigen_values, eigen_vectors = eigsh(M, k + k_skip, sigma=0.0, tol=tol, maxiter=max_iter, v0=v0) except RuntimeError as msg: raise ValueError("Error in determining null-space with ARPACK. " "Error message: '%s'. " "Note that method='arpack' can fail when the " "weight matrix is singular or otherwise " "ill-behaved. method='dense' is recommended. " "See online documentation for more information." % msg) return eigen_vectors[:, k_skip:], np.sum(eigen_values[k_skip:]) elif eigen_solver == 'dense': if hasattr(M, 'toarray'): M = M.toarray() eigen_values, eigen_vectors = eigh( M, eigvals=(k_skip, k + k_skip - 1), overwrite_a=True) index = np.argsort(np.abs(eigen_values)) return eigen_vectors[:, index], np.sum(eigen_values) else: raise ValueError("Unrecognized eigen_solver '%s'" % eigen_solver) def locally_linear_embedding( X, n_neighbors, n_components, reg=1e-3, eigen_solver='auto', tol=1e-6, max_iter=100, method='standard', hessian_tol=1E-4, modified_tol=1E-12, random_state=None, n_jobs=1): """Perform a Locally Linear Embedding analysis on the data. Read more in the :ref:`User Guide <locally_linear_embedding>`. Parameters ---------- X : {array-like, sparse matrix, BallTree, KDTree, NearestNeighbors} Sample data, shape = (n_samples, n_features), in the form of a numpy array, sparse array, precomputed tree, or NearestNeighbors object. n_neighbors : integer number of neighbors to consider for each point. n_components : integer number of coordinates for the manifold. reg : float regularization constant, multiplies the trace of the local covariance matrix of the distances. eigen_solver : string, {'auto', 'arpack', 'dense'} auto : algorithm will attempt to choose the best method for input data arpack : use arnoldi iteration in shift-invert mode. For this method, M may be a dense matrix, sparse matrix, or general linear operator. Warning: ARPACK can be unstable for some problems. It is best to try several random seeds in order to check results. dense : use standard dense matrix operations for the eigenvalue decomposition. For this method, M must be an array or matrix type. This method should be avoided for large problems. tol : float, optional Tolerance for 'arpack' method Not used if eigen_solver=='dense'. max_iter : integer maximum number of iterations for the arpack solver. method : {'standard', 'hessian', 'modified', 'ltsa'} standard : use the standard locally linear embedding algorithm. see reference [1]_ hessian : use the Hessian eigenmap method. This method requires n_neighbors > n_components * (1 + (n_components + 1) / 2. see reference [2]_ modified : use the modified locally linear embedding algorithm. see reference [3]_ ltsa : use local tangent space alignment algorithm see reference [4]_ hessian_tol : float, optional Tolerance for Hessian eigenmapping method. Only used if method == 'hessian' modified_tol : float, optional Tolerance for modified LLE method. Only used if method == 'modified' random_state: numpy.RandomState or int, optional The generator or seed used to determine the starting vector for arpack iterations. Defaults to numpy.random. n_jobs : int, optional (default = 1) The number of parallel jobs to run for neighbors search. If ``-1``, then the number of jobs is set to the number of CPU cores. Returns ------- Y : array-like, shape [n_samples, n_components] Embedding vectors. squared_error : float Reconstruction error for the embedding vectors. Equivalent to ``norm(Y - W Y, 'fro')**2``, where W are the reconstruction weights. References ---------- .. [1] `Roweis, S. & Saul, L. Nonlinear dimensionality reduction by locally linear embedding. Science 290:2323 (2000).` .. [2] `Donoho, D. & Grimes, C. Hessian eigenmaps: Locally linear embedding techniques for high-dimensional data. Proc Natl Acad Sci U S A. 100:5591 (2003).` .. [3] `Zhang, Z. & Wang, J. MLLE: Modified Locally Linear Embedding Using Multiple Weights.` http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.70.382 .. [4] `Zhang, Z. & Zha, H. Principal manifolds and nonlinear dimensionality reduction via tangent space alignment. Journal of Shanghai Univ. 8:406 (2004)` """ if eigen_solver not in ('auto', 'arpack', 'dense'): raise ValueError("unrecognized eigen_solver '%s'" % eigen_solver) if method not in ('standard', 'hessian', 'modified', 'ltsa'): raise ValueError("unrecognized method '%s'" % method) nbrs = NearestNeighbors(n_neighbors=n_neighbors + 1, n_jobs=n_jobs) nbrs.fit(X) X = nbrs._fit_X N, d_in = X.shape if n_components > d_in: raise ValueError("output dimension must be less than or equal " "to input dimension") if n_neighbors >= N: raise ValueError("n_neighbors must be less than number of points") if n_neighbors <= 0: raise ValueError("n_neighbors must be positive") M_sparse = (eigen_solver != 'dense') if method == 'standard': W = barycenter_kneighbors_graph( nbrs, n_neighbors=n_neighbors, reg=reg, n_jobs=n_jobs) # we'll compute M = (I-W)'(I-W) # depending on the solver, we'll do this differently if M_sparse: M = eye(*W.shape, format=W.format) - W M = (M.T * M).tocsr() else: M = (W.T * W - W.T - W).toarray() M.flat[::M.shape[0] + 1] += 1 # W = W - I = W - I elif method == 'hessian': dp = n_components * (n_components + 1) // 2 if n_neighbors <= n_components + dp: raise ValueError("for method='hessian', n_neighbors must be " "greater than " "[n_components * (n_components + 3) / 2]") neighbors = nbrs.kneighbors(X, n_neighbors=n_neighbors + 1, return_distance=False) neighbors = neighbors[:, 1:] Yi = np.empty((n_neighbors, 1 + n_components + dp), dtype=np.float64) Yi[:, 0] = 1 M = np.zeros((N, N), dtype=np.float64) use_svd = (n_neighbors > d_in) for i in range(N): Gi = X[neighbors[i]] Gi -= Gi.mean(0) # build Hessian estimator if use_svd: U = svd(Gi, full_matrices=0)[0] else: Ci = np.dot(Gi, Gi.T) U = eigh(Ci)[1][:, ::-1] Yi[:, 1:1 + n_components] = U[:, :n_components] j = 1 + n_components for k in range(n_components): Yi[:, j:j + n_components - k] = (U[:, k:k + 1] * U[:, k:n_components]) j += n_components - k Q, R = qr(Yi) w = Q[:, n_components + 1:] S = w.sum(0) S[np.where(abs(S) < hessian_tol)] = 1 w /= S nbrs_x, nbrs_y = np.meshgrid(neighbors[i], neighbors[i]) M[nbrs_x, nbrs_y] += np.dot(w, w.T) if M_sparse: M = csr_matrix(M) elif method == 'modified': if n_neighbors < n_components: raise ValueError("modified LLE requires " "n_neighbors >= n_components") neighbors = nbrs.kneighbors(X, n_neighbors=n_neighbors + 1, return_distance=False) neighbors = neighbors[:, 1:] # find the eigenvectors and eigenvalues of each local covariance # matrix. We want V[i] to be a [n_neighbors x n_neighbors] matrix, # where the columns are eigenvectors V = np.zeros((N, n_neighbors, n_neighbors)) nev = min(d_in, n_neighbors) evals = np.zeros([N, nev]) # choose the most efficient way to find the eigenvectors use_svd = (n_neighbors > d_in) if use_svd: for i in range(N): X_nbrs = X[neighbors[i]] - X[i] V[i], evals[i], _ = svd(X_nbrs, full_matrices=True) evals **= 2 else: for i in range(N): X_nbrs = X[neighbors[i]] - X[i] C_nbrs = np.dot(X_nbrs, X_nbrs.T) evi, vi = eigh(C_nbrs) evals[i] = evi[::-1] V[i] = vi[:, ::-1] # find regularized weights: this is like normal LLE. # because we've already computed the SVD of each covariance matrix, # it's faster to use this rather than np.linalg.solve reg = 1E-3 * evals.sum(1) tmp = np.dot(V.transpose(0, 2, 1), np.ones(n_neighbors)) tmp[:, :nev] /= evals + reg[:, None] tmp[:, nev:] /= reg[:, None] w_reg = np.zeros((N, n_neighbors)) for i in range(N): w_reg[i] = np.dot(V[i], tmp[i]) w_reg /= w_reg.sum(1)[:, None] # calculate eta: the median of the ratio of small to large eigenvalues # across the points. This is used to determine s_i, below rho = evals[:, n_components:].sum(1) / evals[:, :n_components].sum(1) eta = np.median(rho) # find s_i, the size of the "almost null space" for each point: # this is the size of the largest set of eigenvalues # such that Sum[v; v in set]/Sum[v; v not in set] < eta s_range = np.zeros(N, dtype=int) evals_cumsum = np.cumsum(evals, 1) eta_range = evals_cumsum[:, -1:] / evals_cumsum[:, :-1] - 1 for i in range(N): s_range[i] = np.searchsorted(eta_range[i, ::-1], eta) s_range += n_neighbors - nev # number of zero eigenvalues # Now calculate M. # This is the [N x N] matrix whose null space is the desired embedding M = np.zeros((N, N), dtype=np.float64) for i in range(N): s_i = s_range[i] # select bottom s_i eigenvectors and calculate alpha Vi = V[i, :, n_neighbors - s_i:] alpha_i = np.linalg.norm(Vi.sum(0)) / np.sqrt(s_i) # compute Householder matrix which satisfies # Hi*Vi.T*ones(n_neighbors) = alpha_i*ones(s) # using prescription from paper h = alpha_i * np.ones(s_i) - np.dot(Vi.T, np.ones(n_neighbors)) norm_h = np.linalg.norm(h) if norm_h < modified_tol: h *= 0 else: h /= norm_h # Householder matrix is # >> Hi = np.identity(s_i) - 2*np.outer(h,h) # Then the weight matrix is # >> Wi = np.dot(Vi,Hi) + (1-alpha_i) * w_reg[i,:,None] # We do this much more efficiently: Wi = (Vi - 2 * np.outer(np.dot(Vi, h), h) + (1 - alpha_i) * w_reg[i, :, None]) # Update M as follows: # >> W_hat = np.zeros( (N,s_i) ) # >> W_hat[neighbors[i],:] = Wi # >> W_hat[i] -= 1 # >> M += np.dot(W_hat,W_hat.T) # We can do this much more efficiently: nbrs_x, nbrs_y = np.meshgrid(neighbors[i], neighbors[i]) M[nbrs_x, nbrs_y] += np.dot(Wi, Wi.T) Wi_sum1 = Wi.sum(1) M[i, neighbors[i]] -= Wi_sum1 M[neighbors[i], i] -= Wi_sum1 M[i, i] += s_i if M_sparse: M = csr_matrix(M) elif method == 'ltsa': neighbors = nbrs.kneighbors(X, n_neighbors=n_neighbors + 1, return_distance=False) neighbors = neighbors[:, 1:] M = np.zeros((N, N)) use_svd = (n_neighbors > d_in) for i in range(N): Xi = X[neighbors[i]] Xi -= Xi.mean(0) # compute n_components largest eigenvalues of Xi * Xi^T if use_svd: v = svd(Xi, full_matrices=True)[0] else: Ci = np.dot(Xi, Xi.T) v = eigh(Ci)[1][:, ::-1] Gi = np.zeros((n_neighbors, n_components + 1)) Gi[:, 1:] = v[:, :n_components] Gi[:, 0] = 1. / np.sqrt(n_neighbors) GiGiT = np.dot(Gi, Gi.T) nbrs_x, nbrs_y = np.meshgrid(neighbors[i], neighbors[i]) M[nbrs_x, nbrs_y] -= GiGiT M[neighbors[i], neighbors[i]] += 1 return null_space(M, n_components, k_skip=1, eigen_solver=eigen_solver, tol=tol, max_iter=max_iter, random_state=random_state) class LocallyLinearEmbedding(BaseEstimator, TransformerMixin): """Locally Linear Embedding Read more in the :ref:`User Guide <locally_linear_embedding>`. Parameters ---------- n_neighbors : integer number of neighbors to consider for each point. n_components : integer number of coordinates for the manifold reg : float regularization constant, multiplies the trace of the local covariance matrix of the distances. eigen_solver : string, {'auto', 'arpack', 'dense'} auto : algorithm will attempt to choose the best method for input data arpack : use arnoldi iteration in shift-invert mode. For this method, M may be a dense matrix, sparse matrix, or general linear operator. Warning: ARPACK can be unstable for some problems. It is best to try several random seeds in order to check results. dense : use standard dense matrix operations for the eigenvalue decomposition. For this method, M must be an array or matrix type. This method should be avoided for large problems. tol : float, optional Tolerance for 'arpack' method Not used if eigen_solver=='dense'. max_iter : integer maximum number of iterations for the arpack solver. Not used if eigen_solver=='dense'. method : string ('standard', 'hessian', 'modified' or 'ltsa') standard : use the standard locally linear embedding algorithm. see reference [1] hessian : use the Hessian eigenmap method. This method requires ``n_neighbors > n_components * (1 + (n_components + 1) / 2`` see reference [2] modified : use the modified locally linear embedding algorithm. see reference [3] ltsa : use local tangent space alignment algorithm see reference [4] hessian_tol : float, optional Tolerance for Hessian eigenmapping method. Only used if ``method == 'hessian'`` modified_tol : float, optional Tolerance for modified LLE method. Only used if ``method == 'modified'`` neighbors_algorithm : string ['auto'|'brute'|'kd_tree'|'ball_tree'] algorithm to use for nearest neighbors search, passed to neighbors.NearestNeighbors instance random_state: numpy.RandomState or int, optional The generator or seed used to determine the starting vector for arpack iterations. Defaults to numpy.random. n_jobs : int, optional (default = 1) The number of parallel jobs to run. If ``-1``, then the number of jobs is set to the number of CPU cores. Attributes ---------- embedding_vectors_ : array-like, shape [n_components, n_samples] Stores the embedding vectors reconstruction_error_ : float Reconstruction error associated with `embedding_vectors_` nbrs_ : NearestNeighbors object Stores nearest neighbors instance, including BallTree or KDtree if applicable. References ---------- .. [1] `Roweis, S. & Saul, L. Nonlinear dimensionality reduction by locally linear embedding. Science 290:2323 (2000).` .. [2] `Donoho, D. & Grimes, C. Hessian eigenmaps: Locally linear embedding techniques for high-dimensional data. Proc Natl Acad Sci U S A. 100:5591 (2003).` .. [3] `Zhang, Z. & Wang, J. MLLE: Modified Locally Linear Embedding Using Multiple Weights.` http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.70.382 .. [4] `Zhang, Z. & Zha, H. Principal manifolds and nonlinear dimensionality reduction via tangent space alignment. Journal of Shanghai Univ. 8:406 (2004)` """ def __init__(self, n_neighbors=5, n_components=2, reg=1E-3, eigen_solver='auto', tol=1E-6, max_iter=100, method='standard', hessian_tol=1E-4, modified_tol=1E-12, neighbors_algorithm='auto', random_state=None, n_jobs=1): self.n_neighbors = n_neighbors self.n_components = n_components self.reg = reg self.eigen_solver = eigen_solver self.tol = tol self.max_iter = max_iter self.method = method self.hessian_tol = hessian_tol self.modified_tol = modified_tol self.random_state = random_state self.neighbors_algorithm = neighbors_algorithm self.n_jobs = n_jobs def _fit_transform(self, X): self.nbrs_ = NearestNeighbors(self.n_neighbors, algorithm=self.neighbors_algorithm, n_jobs=self.n_jobs) random_state = check_random_state(self.random_state) X = check_array(X) self.nbrs_.fit(X) self.embedding_, self.reconstruction_error_ = \ locally_linear_embedding( self.nbrs_, self.n_neighbors, self.n_components, eigen_solver=self.eigen_solver, tol=self.tol, max_iter=self.max_iter, method=self.method, hessian_tol=self.hessian_tol, modified_tol=self.modified_tol, random_state=random_state, reg=self.reg, n_jobs=self.n_jobs) def fit(self, X, y=None): """Compute the embedding vectors for data X Parameters ---------- X : array-like of shape [n_samples, n_features] training set. Returns ------- self : returns an instance of self. """ self._fit_transform(X) return self def fit_transform(self, X, y=None): """Compute the embedding vectors for data X and transform X. Parameters ---------- X : array-like of shape [n_samples, n_features] training set. Returns ------- X_new: array-like, shape (n_samples, n_components) """ self._fit_transform(X) return self.embedding_ def transform(self, X): """ Transform new points into embedding space. Parameters ---------- X : array-like, shape = [n_samples, n_features] Returns ------- X_new : array, shape = [n_samples, n_components] Notes ----- Because of scaling performed by this method, it is discouraged to use it together with methods that are not scale-invariant (like SVMs) """ check_is_fitted(self, "nbrs_") X = check_array(X) ind = self.nbrs_.kneighbors(X, n_neighbors=self.n_neighbors, return_distance=False) weights = barycenter_weights(X, self.nbrs_._fit_X[ind], reg=self.reg) X_new = np.empty((X.shape[0], self.n_components)) for i in range(X.shape[0]): X_new[i] = np.dot(self.embedding_[ind[i]].T, weights[i]) return X_new
bsd-3-clause
DSLituiev/scikit-learn
sklearn/metrics/tests/test_ranking.py
31
41905
from __future__ import division, print_function import numpy as np from itertools import product import warnings from scipy.sparse import csr_matrix from sklearn import datasets from sklearn import svm from sklearn import ensemble from sklearn.datasets import make_multilabel_classification from sklearn.random_projection import sparse_random_matrix from sklearn.utils.validation import check_array, check_consistent_length from sklearn.utils.validation import check_random_state from sklearn.utils.testing import assert_raises, clean_warning_registry from sklearn.utils.testing import assert_raise_message from sklearn.utils.testing import assert_equal from sklearn.utils.testing import assert_almost_equal from sklearn.utils.testing import assert_array_equal from sklearn.utils.testing import assert_array_almost_equal from sklearn.utils.testing import assert_warns from sklearn.metrics import auc from sklearn.metrics import average_precision_score from sklearn.metrics import coverage_error from sklearn.metrics import label_ranking_average_precision_score from sklearn.metrics import precision_recall_curve from sklearn.metrics import label_ranking_loss from sklearn.metrics import roc_auc_score from sklearn.metrics import roc_curve from sklearn.exceptions import UndefinedMetricWarning ############################################################################### # Utilities for testing def make_prediction(dataset=None, binary=False): """Make some classification predictions on a toy dataset using a SVC If binary is True restrict to a binary classification problem instead of a multiclass classification problem """ if dataset is None: # import some data to play with dataset = datasets.load_iris() X = dataset.data y = dataset.target if binary: # restrict to a binary classification task X, y = X[y < 2], y[y < 2] n_samples, n_features = X.shape p = np.arange(n_samples) rng = check_random_state(37) rng.shuffle(p) X, y = X[p], y[p] half = int(n_samples / 2) # add noisy features to make the problem harder and avoid perfect results rng = np.random.RandomState(0) X = np.c_[X, rng.randn(n_samples, 200 * n_features)] # run classifier, get class probabilities and label predictions clf = svm.SVC(kernel='linear', probability=True, random_state=0) probas_pred = clf.fit(X[:half], y[:half]).predict_proba(X[half:]) if binary: # only interested in probabilities of the positive case # XXX: do we really want a special API for the binary case? probas_pred = probas_pred[:, 1] y_pred = clf.predict(X[half:]) y_true = y[half:] return y_true, y_pred, probas_pred ############################################################################### # Tests def _auc(y_true, y_score): """Alternative implementation to check for correctness of `roc_auc_score`.""" pos_label = np.unique(y_true)[1] # Count the number of times positive samples are correctly ranked above # negative samples. pos = y_score[y_true == pos_label] neg = y_score[y_true != pos_label] diff_matrix = pos.reshape(1, -1) - neg.reshape(-1, 1) n_correct = np.sum(diff_matrix > 0) return n_correct / float(len(pos) * len(neg)) def _average_precision(y_true, y_score): """Alternative implementation to check for correctness of `average_precision_score`.""" pos_label = np.unique(y_true)[1] n_pos = np.sum(y_true == pos_label) order = np.argsort(y_score)[::-1] y_score = y_score[order] y_true = y_true[order] score = 0 for i in range(len(y_score)): if y_true[i] == pos_label: # Compute precision up to document i # i.e, percentage of relevant documents up to document i. prec = 0 for j in range(0, i + 1): if y_true[j] == pos_label: prec += 1.0 prec /= (i + 1.0) score += prec return score / n_pos def test_roc_curve(): # Test Area under Receiver Operating Characteristic (ROC) curve y_true, _, probas_pred = make_prediction(binary=True) expected_auc = _auc(y_true, probas_pred) for drop in [True, False]: fpr, tpr, thresholds = roc_curve(y_true, probas_pred, drop_intermediate=drop) roc_auc = auc(fpr, tpr) assert_array_almost_equal(roc_auc, expected_auc, decimal=2) assert_almost_equal(roc_auc, roc_auc_score(y_true, probas_pred)) assert_equal(fpr.shape, tpr.shape) assert_equal(fpr.shape, thresholds.shape) def test_roc_curve_end_points(): # Make sure that roc_curve returns a curve start at 0 and ending and # 1 even in corner cases rng = np.random.RandomState(0) y_true = np.array([0] * 50 + [1] * 50) y_pred = rng.randint(3, size=100) fpr, tpr, thr = roc_curve(y_true, y_pred, drop_intermediate=True) assert_equal(fpr[0], 0) assert_equal(fpr[-1], 1) assert_equal(fpr.shape, tpr.shape) assert_equal(fpr.shape, thr.shape) def test_roc_returns_consistency(): # Test whether the returned threshold matches up with tpr # make small toy dataset y_true, _, probas_pred = make_prediction(binary=True) fpr, tpr, thresholds = roc_curve(y_true, probas_pred) # use the given thresholds to determine the tpr tpr_correct = [] for t in thresholds: tp = np.sum((probas_pred >= t) & y_true) p = np.sum(y_true) tpr_correct.append(1.0 * tp / p) # compare tpr and tpr_correct to see if the thresholds' order was correct assert_array_almost_equal(tpr, tpr_correct, decimal=2) assert_equal(fpr.shape, tpr.shape) assert_equal(fpr.shape, thresholds.shape) def test_roc_nonrepeating_thresholds(): # Test to ensure that we don't return spurious repeating thresholds. # Duplicated thresholds can arise due to machine precision issues. dataset = datasets.load_digits() X = dataset['data'] y = dataset['target'] # This random forest classifier can only return probabilities # significant to two decimal places clf = ensemble.RandomForestClassifier(n_estimators=100, random_state=0) # How well can the classifier predict whether a digit is less than 5? # This task contributes floating point roundoff errors to the probabilities train, test = slice(None, None, 2), slice(1, None, 2) probas_pred = clf.fit(X[train], y[train]).predict_proba(X[test]) y_score = probas_pred[:, :5].sum(axis=1) # roundoff errors begin here y_true = [yy < 5 for yy in y[test]] # Check for repeating values in the thresholds fpr, tpr, thresholds = roc_curve(y_true, y_score, drop_intermediate=False) assert_equal(thresholds.size, np.unique(np.round(thresholds, 2)).size) def test_roc_curve_multi(): # roc_curve not applicable for multi-class problems y_true, _, probas_pred = make_prediction(binary=False) assert_raises(ValueError, roc_curve, y_true, probas_pred) def test_roc_curve_confidence(): # roc_curve for confidence scores y_true, _, probas_pred = make_prediction(binary=True) fpr, tpr, thresholds = roc_curve(y_true, probas_pred - 0.5) roc_auc = auc(fpr, tpr) assert_array_almost_equal(roc_auc, 0.90, decimal=2) assert_equal(fpr.shape, tpr.shape) assert_equal(fpr.shape, thresholds.shape) def test_roc_curve_hard(): # roc_curve for hard decisions y_true, pred, probas_pred = make_prediction(binary=True) # always predict one trivial_pred = np.ones(y_true.shape) fpr, tpr, thresholds = roc_curve(y_true, trivial_pred) roc_auc = auc(fpr, tpr) assert_array_almost_equal(roc_auc, 0.50, decimal=2) assert_equal(fpr.shape, tpr.shape) assert_equal(fpr.shape, thresholds.shape) # always predict zero trivial_pred = np.zeros(y_true.shape) fpr, tpr, thresholds = roc_curve(y_true, trivial_pred) roc_auc = auc(fpr, tpr) assert_array_almost_equal(roc_auc, 0.50, decimal=2) assert_equal(fpr.shape, tpr.shape) assert_equal(fpr.shape, thresholds.shape) # hard decisions fpr, tpr, thresholds = roc_curve(y_true, pred) roc_auc = auc(fpr, tpr) assert_array_almost_equal(roc_auc, 0.78, decimal=2) assert_equal(fpr.shape, tpr.shape) assert_equal(fpr.shape, thresholds.shape) def test_roc_curve_one_label(): y_true = [1, 1, 1, 1, 1, 1, 1, 1, 1, 1] y_pred = [0, 1, 0, 1, 0, 1, 0, 1, 0, 1] # assert there are warnings w = UndefinedMetricWarning fpr, tpr, thresholds = assert_warns(w, roc_curve, y_true, y_pred) # all true labels, all fpr should be nan assert_array_equal(fpr, np.nan * np.ones(len(thresholds))) assert_equal(fpr.shape, tpr.shape) assert_equal(fpr.shape, thresholds.shape) # assert there are warnings fpr, tpr, thresholds = assert_warns(w, roc_curve, [1 - x for x in y_true], y_pred) # all negative labels, all tpr should be nan assert_array_equal(tpr, np.nan * np.ones(len(thresholds))) assert_equal(fpr.shape, tpr.shape) assert_equal(fpr.shape, thresholds.shape) def test_roc_curve_toydata(): # Binary classification y_true = [0, 1] y_score = [0, 1] tpr, fpr, _ = roc_curve(y_true, y_score) roc_auc = roc_auc_score(y_true, y_score) assert_array_almost_equal(tpr, [0, 1]) assert_array_almost_equal(fpr, [1, 1]) assert_almost_equal(roc_auc, 1.) y_true = [0, 1] y_score = [1, 0] tpr, fpr, _ = roc_curve(y_true, y_score) roc_auc = roc_auc_score(y_true, y_score) assert_array_almost_equal(tpr, [0, 1, 1]) assert_array_almost_equal(fpr, [0, 0, 1]) assert_almost_equal(roc_auc, 0.) y_true = [1, 0] y_score = [1, 1] tpr, fpr, _ = roc_curve(y_true, y_score) roc_auc = roc_auc_score(y_true, y_score) assert_array_almost_equal(tpr, [0, 1]) assert_array_almost_equal(fpr, [0, 1]) assert_almost_equal(roc_auc, 0.5) y_true = [1, 0] y_score = [1, 0] tpr, fpr, _ = roc_curve(y_true, y_score) roc_auc = roc_auc_score(y_true, y_score) assert_array_almost_equal(tpr, [0, 1]) assert_array_almost_equal(fpr, [1, 1]) assert_almost_equal(roc_auc, 1.) y_true = [1, 0] y_score = [0.5, 0.5] tpr, fpr, _ = roc_curve(y_true, y_score) roc_auc = roc_auc_score(y_true, y_score) assert_array_almost_equal(tpr, [0, 1]) assert_array_almost_equal(fpr, [0, 1]) assert_almost_equal(roc_auc, .5) y_true = [0, 0] y_score = [0.25, 0.75] # assert UndefinedMetricWarning because of no positive sample in y_true tpr, fpr, _ = assert_warns(UndefinedMetricWarning, roc_curve, y_true, y_score) assert_raises(ValueError, roc_auc_score, y_true, y_score) assert_array_almost_equal(tpr, [0., 0.5, 1.]) assert_array_almost_equal(fpr, [np.nan, np.nan, np.nan]) y_true = [1, 1] y_score = [0.25, 0.75] # assert UndefinedMetricWarning because of no negative sample in y_true tpr, fpr, _ = assert_warns(UndefinedMetricWarning, roc_curve, y_true, y_score) assert_raises(ValueError, roc_auc_score, y_true, y_score) assert_array_almost_equal(tpr, [np.nan, np.nan]) assert_array_almost_equal(fpr, [0.5, 1.]) # Multi-label classification task y_true = np.array([[0, 1], [0, 1]]) y_score = np.array([[0, 1], [0, 1]]) assert_raises(ValueError, roc_auc_score, y_true, y_score, average="macro") assert_raises(ValueError, roc_auc_score, y_true, y_score, average="weighted") assert_almost_equal(roc_auc_score(y_true, y_score, average="samples"), 1.) assert_almost_equal(roc_auc_score(y_true, y_score, average="micro"), 1.) y_true = np.array([[0, 1], [0, 1]]) y_score = np.array([[0, 1], [1, 0]]) assert_raises(ValueError, roc_auc_score, y_true, y_score, average="macro") assert_raises(ValueError, roc_auc_score, y_true, y_score, average="weighted") assert_almost_equal(roc_auc_score(y_true, y_score, average="samples"), 0.5) assert_almost_equal(roc_auc_score(y_true, y_score, average="micro"), 0.5) y_true = np.array([[1, 0], [0, 1]]) y_score = np.array([[0, 1], [1, 0]]) assert_almost_equal(roc_auc_score(y_true, y_score, average="macro"), 0) assert_almost_equal(roc_auc_score(y_true, y_score, average="weighted"), 0) assert_almost_equal(roc_auc_score(y_true, y_score, average="samples"), 0) assert_almost_equal(roc_auc_score(y_true, y_score, average="micro"), 0) y_true = np.array([[1, 0], [0, 1]]) y_score = np.array([[0.5, 0.5], [0.5, 0.5]]) assert_almost_equal(roc_auc_score(y_true, y_score, average="macro"), .5) assert_almost_equal(roc_auc_score(y_true, y_score, average="weighted"), .5) assert_almost_equal(roc_auc_score(y_true, y_score, average="samples"), .5) assert_almost_equal(roc_auc_score(y_true, y_score, average="micro"), .5) def test_roc_curve_drop_intermediate(): # Test that drop_intermediate drops the correct thresholds y_true = [0, 0, 0, 0, 1, 1] y_score = [0., 0.2, 0.5, 0.6, 0.7, 1.0] tpr, fpr, thresholds = roc_curve(y_true, y_score, drop_intermediate=True) assert_array_almost_equal(thresholds, [1., 0.7, 0.]) # Test dropping thresholds with repeating scores y_true = [0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1] y_score = [0., 0.1, 0.6, 0.6, 0.7, 0.8, 0.9, 0.6, 0.7, 0.8, 0.9, 0.9, 1.0] tpr, fpr, thresholds = roc_curve(y_true, y_score, drop_intermediate=True) assert_array_almost_equal(thresholds, [1.0, 0.9, 0.7, 0.6, 0.]) def test_auc(): # Test Area Under Curve (AUC) computation x = [0, 1] y = [0, 1] assert_array_almost_equal(auc(x, y), 0.5) x = [1, 0] y = [0, 1] assert_array_almost_equal(auc(x, y), 0.5) x = [1, 0, 0] y = [0, 1, 1] assert_array_almost_equal(auc(x, y), 0.5) x = [0, 1] y = [1, 1] assert_array_almost_equal(auc(x, y), 1) x = [0, 0.5, 1] y = [0, 0.5, 1] assert_array_almost_equal(auc(x, y), 0.5) def test_auc_duplicate_values(): # Test Area Under Curve (AUC) computation with duplicate values # auc() was previously sorting the x and y arrays according to the indices # from numpy.argsort(x), which was reordering the tied 0's in this example # and resulting in an incorrect area computation. This test detects the # error. x = [-2.0, 0.0, 0.0, 0.0, 1.0] y1 = [2.0, 0.0, 0.5, 1.0, 1.0] y2 = [2.0, 1.0, 0.0, 0.5, 1.0] y3 = [2.0, 1.0, 0.5, 0.0, 1.0] for y in (y1, y2, y3): assert_array_almost_equal(auc(x, y, reorder=True), 3.0) def test_auc_errors(): # Incompatible shapes assert_raises(ValueError, auc, [0.0, 0.5, 1.0], [0.1, 0.2]) # Too few x values assert_raises(ValueError, auc, [0.0], [0.1]) # x is not in order assert_raises(ValueError, auc, [1.0, 0.0, 0.5], [0.0, 0.0, 0.0]) def test_auc_score_non_binary_class(): # Test that roc_auc_score function returns an error when trying # to compute AUC for non-binary class values. rng = check_random_state(404) y_pred = rng.rand(10) # y_true contains only one class value y_true = np.zeros(10, dtype="int") assert_raise_message(ValueError, "ROC AUC score is not defined", roc_auc_score, y_true, y_pred) y_true = np.ones(10, dtype="int") assert_raise_message(ValueError, "ROC AUC score is not defined", roc_auc_score, y_true, y_pred) y_true = -np.ones(10, dtype="int") assert_raise_message(ValueError, "ROC AUC score is not defined", roc_auc_score, y_true, y_pred) # y_true contains three different class values y_true = rng.randint(0, 3, size=10) assert_raise_message(ValueError, "multiclass format is not supported", roc_auc_score, y_true, y_pred) clean_warning_registry() with warnings.catch_warnings(record=True): rng = check_random_state(404) y_pred = rng.rand(10) # y_true contains only one class value y_true = np.zeros(10, dtype="int") assert_raise_message(ValueError, "ROC AUC score is not defined", roc_auc_score, y_true, y_pred) y_true = np.ones(10, dtype="int") assert_raise_message(ValueError, "ROC AUC score is not defined", roc_auc_score, y_true, y_pred) y_true = -np.ones(10, dtype="int") assert_raise_message(ValueError, "ROC AUC score is not defined", roc_auc_score, y_true, y_pred) # y_true contains three different class values y_true = rng.randint(0, 3, size=10) assert_raise_message(ValueError, "multiclass format is not supported", roc_auc_score, y_true, y_pred) def test_precision_recall_curve(): y_true, _, probas_pred = make_prediction(binary=True) _test_precision_recall_curve(y_true, probas_pred) # Use {-1, 1} for labels; make sure original labels aren't modified y_true[np.where(y_true == 0)] = -1 y_true_copy = y_true.copy() _test_precision_recall_curve(y_true, probas_pred) assert_array_equal(y_true_copy, y_true) labels = [1, 0, 0, 1] predict_probas = [1, 2, 3, 4] p, r, t = precision_recall_curve(labels, predict_probas) assert_array_almost_equal(p, np.array([0.5, 0.33333333, 0.5, 1., 1.])) assert_array_almost_equal(r, np.array([1., 0.5, 0.5, 0.5, 0.])) assert_array_almost_equal(t, np.array([1, 2, 3, 4])) assert_equal(p.size, r.size) assert_equal(p.size, t.size + 1) def test_precision_recall_curve_pos_label(): y_true, _, probas_pred = make_prediction(binary=False) pos_label = 2 p, r, thresholds = precision_recall_curve(y_true, probas_pred[:, pos_label], pos_label=pos_label) p2, r2, thresholds2 = precision_recall_curve(y_true == pos_label, probas_pred[:, pos_label]) assert_array_almost_equal(p, p2) assert_array_almost_equal(r, r2) assert_array_almost_equal(thresholds, thresholds2) assert_equal(p.size, r.size) assert_equal(p.size, thresholds.size + 1) def _test_precision_recall_curve(y_true, probas_pred): # Test Precision-Recall and aread under PR curve p, r, thresholds = precision_recall_curve(y_true, probas_pred) precision_recall_auc = auc(r, p) assert_array_almost_equal(precision_recall_auc, 0.85, 2) assert_array_almost_equal(precision_recall_auc, average_precision_score(y_true, probas_pred)) assert_almost_equal(_average_precision(y_true, probas_pred), precision_recall_auc, 1) assert_equal(p.size, r.size) assert_equal(p.size, thresholds.size + 1) # Smoke test in the case of proba having only one value p, r, thresholds = precision_recall_curve(y_true, np.zeros_like(probas_pred)) precision_recall_auc = auc(r, p) assert_array_almost_equal(precision_recall_auc, 0.75, 3) assert_equal(p.size, r.size) assert_equal(p.size, thresholds.size + 1) def test_precision_recall_curve_errors(): # Contains non-binary labels assert_raises(ValueError, precision_recall_curve, [0, 1, 2], [[0.0], [1.0], [1.0]]) def test_precision_recall_curve_toydata(): with np.errstate(all="raise"): # Binary classification y_true = [0, 1] y_score = [0, 1] p, r, _ = precision_recall_curve(y_true, y_score) auc_prc = average_precision_score(y_true, y_score) assert_array_almost_equal(p, [1, 1]) assert_array_almost_equal(r, [1, 0]) assert_almost_equal(auc_prc, 1.) y_true = [0, 1] y_score = [1, 0] p, r, _ = precision_recall_curve(y_true, y_score) auc_prc = average_precision_score(y_true, y_score) assert_array_almost_equal(p, [0.5, 0., 1.]) assert_array_almost_equal(r, [1., 0., 0.]) assert_almost_equal(auc_prc, 0.25) y_true = [1, 0] y_score = [1, 1] p, r, _ = precision_recall_curve(y_true, y_score) auc_prc = average_precision_score(y_true, y_score) assert_array_almost_equal(p, [0.5, 1]) assert_array_almost_equal(r, [1., 0]) assert_almost_equal(auc_prc, .75) y_true = [1, 0] y_score = [1, 0] p, r, _ = precision_recall_curve(y_true, y_score) auc_prc = average_precision_score(y_true, y_score) assert_array_almost_equal(p, [1, 1]) assert_array_almost_equal(r, [1, 0]) assert_almost_equal(auc_prc, 1.) y_true = [1, 0] y_score = [0.5, 0.5] p, r, _ = precision_recall_curve(y_true, y_score) auc_prc = average_precision_score(y_true, y_score) assert_array_almost_equal(p, [0.5, 1]) assert_array_almost_equal(r, [1, 0.]) assert_almost_equal(auc_prc, .75) y_true = [0, 0] y_score = [0.25, 0.75] assert_raises(Exception, precision_recall_curve, y_true, y_score) assert_raises(Exception, average_precision_score, y_true, y_score) y_true = [1, 1] y_score = [0.25, 0.75] p, r, _ = precision_recall_curve(y_true, y_score) assert_almost_equal(average_precision_score(y_true, y_score), 1.) assert_array_almost_equal(p, [1., 1., 1.]) assert_array_almost_equal(r, [1, 0.5, 0.]) # Multi-label classification task y_true = np.array([[0, 1], [0, 1]]) y_score = np.array([[0, 1], [0, 1]]) assert_raises(Exception, average_precision_score, y_true, y_score, average="macro") assert_raises(Exception, average_precision_score, y_true, y_score, average="weighted") assert_almost_equal(average_precision_score(y_true, y_score, average="samples"), 1.) assert_almost_equal(average_precision_score(y_true, y_score, average="micro"), 1.) y_true = np.array([[0, 1], [0, 1]]) y_score = np.array([[0, 1], [1, 0]]) assert_raises(Exception, average_precision_score, y_true, y_score, average="macro") assert_raises(Exception, average_precision_score, y_true, y_score, average="weighted") assert_almost_equal(average_precision_score(y_true, y_score, average="samples"), 0.625) assert_almost_equal(average_precision_score(y_true, y_score, average="micro"), 0.625) y_true = np.array([[1, 0], [0, 1]]) y_score = np.array([[0, 1], [1, 0]]) assert_almost_equal(average_precision_score(y_true, y_score, average="macro"), 0.25) assert_almost_equal(average_precision_score(y_true, y_score, average="weighted"), 0.25) assert_almost_equal(average_precision_score(y_true, y_score, average="samples"), 0.25) assert_almost_equal(average_precision_score(y_true, y_score, average="micro"), 0.25) y_true = np.array([[1, 0], [0, 1]]) y_score = np.array([[0.5, 0.5], [0.5, 0.5]]) assert_almost_equal(average_precision_score(y_true, y_score, average="macro"), 0.75) assert_almost_equal(average_precision_score(y_true, y_score, average="weighted"), 0.75) assert_almost_equal(average_precision_score(y_true, y_score, average="samples"), 0.75) assert_almost_equal(average_precision_score(y_true, y_score, average="micro"), 0.75) def test_score_scale_invariance(): # Test that average_precision_score and roc_auc_score are invariant by # the scaling or shifting of probabilities y_true, _, probas_pred = make_prediction(binary=True) roc_auc = roc_auc_score(y_true, probas_pred) roc_auc_scaled = roc_auc_score(y_true, 100 * probas_pred) roc_auc_shifted = roc_auc_score(y_true, probas_pred - 10) assert_equal(roc_auc, roc_auc_scaled) assert_equal(roc_auc, roc_auc_shifted) pr_auc = average_precision_score(y_true, probas_pred) pr_auc_scaled = average_precision_score(y_true, 100 * probas_pred) pr_auc_shifted = average_precision_score(y_true, probas_pred - 10) assert_equal(pr_auc, pr_auc_scaled) assert_equal(pr_auc, pr_auc_shifted) def check_lrap_toy(lrap_score): # Check on several small example that it works assert_almost_equal(lrap_score([[0, 1]], [[0.25, 0.75]]), 1) assert_almost_equal(lrap_score([[0, 1]], [[0.75, 0.25]]), 1 / 2) assert_almost_equal(lrap_score([[1, 1]], [[0.75, 0.25]]), 1) assert_almost_equal(lrap_score([[0, 0, 1]], [[0.25, 0.5, 0.75]]), 1) assert_almost_equal(lrap_score([[0, 1, 0]], [[0.25, 0.5, 0.75]]), 1 / 2) assert_almost_equal(lrap_score([[0, 1, 1]], [[0.25, 0.5, 0.75]]), 1) assert_almost_equal(lrap_score([[1, 0, 0]], [[0.25, 0.5, 0.75]]), 1 / 3) assert_almost_equal(lrap_score([[1, 0, 1]], [[0.25, 0.5, 0.75]]), (2 / 3 + 1 / 1) / 2) assert_almost_equal(lrap_score([[1, 1, 0]], [[0.25, 0.5, 0.75]]), (2 / 3 + 1 / 2) / 2) assert_almost_equal(lrap_score([[0, 0, 1]], [[0.75, 0.5, 0.25]]), 1 / 3) assert_almost_equal(lrap_score([[0, 1, 0]], [[0.75, 0.5, 0.25]]), 1 / 2) assert_almost_equal(lrap_score([[0, 1, 1]], [[0.75, 0.5, 0.25]]), (1 / 2 + 2 / 3) / 2) assert_almost_equal(lrap_score([[1, 0, 0]], [[0.75, 0.5, 0.25]]), 1) assert_almost_equal(lrap_score([[1, 0, 1]], [[0.75, 0.5, 0.25]]), (1 + 2 / 3) / 2) assert_almost_equal(lrap_score([[1, 1, 0]], [[0.75, 0.5, 0.25]]), 1) assert_almost_equal(lrap_score([[1, 1, 1]], [[0.75, 0.5, 0.25]]), 1) assert_almost_equal(lrap_score([[0, 0, 1]], [[0.5, 0.75, 0.25]]), 1 / 3) assert_almost_equal(lrap_score([[0, 1, 0]], [[0.5, 0.75, 0.25]]), 1) assert_almost_equal(lrap_score([[0, 1, 1]], [[0.5, 0.75, 0.25]]), (1 + 2 / 3) / 2) assert_almost_equal(lrap_score([[1, 0, 0]], [[0.5, 0.75, 0.25]]), 1 / 2) assert_almost_equal(lrap_score([[1, 0, 1]], [[0.5, 0.75, 0.25]]), (1 / 2 + 2 / 3) / 2) assert_almost_equal(lrap_score([[1, 1, 0]], [[0.5, 0.75, 0.25]]), 1) assert_almost_equal(lrap_score([[1, 1, 1]], [[0.5, 0.75, 0.25]]), 1) # Tie handling assert_almost_equal(lrap_score([[1, 0]], [[0.5, 0.5]]), 0.5) assert_almost_equal(lrap_score([[0, 1]], [[0.5, 0.5]]), 0.5) assert_almost_equal(lrap_score([[1, 1]], [[0.5, 0.5]]), 1) assert_almost_equal(lrap_score([[0, 0, 1]], [[0.25, 0.5, 0.5]]), 0.5) assert_almost_equal(lrap_score([[0, 1, 0]], [[0.25, 0.5, 0.5]]), 0.5) assert_almost_equal(lrap_score([[0, 1, 1]], [[0.25, 0.5, 0.5]]), 1) assert_almost_equal(lrap_score([[1, 0, 0]], [[0.25, 0.5, 0.5]]), 1 / 3) assert_almost_equal(lrap_score([[1, 0, 1]], [[0.25, 0.5, 0.5]]), (2 / 3 + 1 / 2) / 2) assert_almost_equal(lrap_score([[1, 1, 0]], [[0.25, 0.5, 0.5]]), (2 / 3 + 1 / 2) / 2) assert_almost_equal(lrap_score([[1, 1, 1]], [[0.25, 0.5, 0.5]]), 1) assert_almost_equal(lrap_score([[1, 1, 0]], [[0.5, 0.5, 0.5]]), 2 / 3) assert_almost_equal(lrap_score([[1, 1, 1, 0]], [[0.5, 0.5, 0.5, 0.5]]), 3 / 4) def check_zero_or_all_relevant_labels(lrap_score): random_state = check_random_state(0) for n_labels in range(2, 5): y_score = random_state.uniform(size=(1, n_labels)) y_score_ties = np.zeros_like(y_score) # No relevant labels y_true = np.zeros((1, n_labels)) assert_equal(lrap_score(y_true, y_score), 1.) assert_equal(lrap_score(y_true, y_score_ties), 1.) # Only relevant labels y_true = np.ones((1, n_labels)) assert_equal(lrap_score(y_true, y_score), 1.) assert_equal(lrap_score(y_true, y_score_ties), 1.) # Degenerate case: only one label assert_almost_equal(lrap_score([[1], [0], [1], [0]], [[0.5], [0.5], [0.5], [0.5]]), 1.) def check_lrap_error_raised(lrap_score): # Raise value error if not appropriate format assert_raises(ValueError, lrap_score, [0, 1, 0], [0.25, 0.3, 0.2]) assert_raises(ValueError, lrap_score, [0, 1, 2], [[0.25, 0.75, 0.0], [0.7, 0.3, 0.0], [0.8, 0.2, 0.0]]) assert_raises(ValueError, lrap_score, [(0), (1), (2)], [[0.25, 0.75, 0.0], [0.7, 0.3, 0.0], [0.8, 0.2, 0.0]]) # Check that y_true.shape != y_score.shape raise the proper exception assert_raises(ValueError, lrap_score, [[0, 1], [0, 1]], [0, 1]) assert_raises(ValueError, lrap_score, [[0, 1], [0, 1]], [[0, 1]]) assert_raises(ValueError, lrap_score, [[0, 1], [0, 1]], [[0], [1]]) assert_raises(ValueError, lrap_score, [[0, 1]], [[0, 1], [0, 1]]) assert_raises(ValueError, lrap_score, [[0], [1]], [[0, 1], [0, 1]]) assert_raises(ValueError, lrap_score, [[0, 1], [0, 1]], [[0], [1]]) def check_lrap_only_ties(lrap_score): # Check tie handling in score # Basic check with only ties and increasing label space for n_labels in range(2, 10): y_score = np.ones((1, n_labels)) # Check for growing number of consecutive relevant for n_relevant in range(1, n_labels): # Check for a bunch of positions for pos in range(n_labels - n_relevant): y_true = np.zeros((1, n_labels)) y_true[0, pos:pos + n_relevant] = 1 assert_almost_equal(lrap_score(y_true, y_score), n_relevant / n_labels) def check_lrap_without_tie_and_increasing_score(lrap_score): # Check that Label ranking average precision works for various # Basic check with increasing label space size and decreasing score for n_labels in range(2, 10): y_score = n_labels - (np.arange(n_labels).reshape((1, n_labels)) + 1) # First and last y_true = np.zeros((1, n_labels)) y_true[0, 0] = 1 y_true[0, -1] = 1 assert_almost_equal(lrap_score(y_true, y_score), (2 / n_labels + 1) / 2) # Check for growing number of consecutive relevant label for n_relevant in range(1, n_labels): # Check for a bunch of position for pos in range(n_labels - n_relevant): y_true = np.zeros((1, n_labels)) y_true[0, pos:pos + n_relevant] = 1 assert_almost_equal(lrap_score(y_true, y_score), sum((r + 1) / ((pos + r + 1) * n_relevant) for r in range(n_relevant))) def _my_lrap(y_true, y_score): """Simple implementation of label ranking average precision""" check_consistent_length(y_true, y_score) y_true = check_array(y_true) y_score = check_array(y_score) n_samples, n_labels = y_true.shape score = np.empty((n_samples, )) for i in range(n_samples): # The best rank correspond to 1. Rank higher than 1 are worse. # The best inverse ranking correspond to n_labels. unique_rank, inv_rank = np.unique(y_score[i], return_inverse=True) n_ranks = unique_rank.size rank = n_ranks - inv_rank # Rank need to be corrected to take into account ties # ex: rank 1 ex aequo means that both label are rank 2. corr_rank = np.bincount(rank, minlength=n_ranks + 1).cumsum() rank = corr_rank[rank] relevant = y_true[i].nonzero()[0] if relevant.size == 0 or relevant.size == n_labels: score[i] = 1 continue score[i] = 0. for label in relevant: # Let's count the number of relevant label with better rank # (smaller rank). n_ranked_above = sum(rank[r] <= rank[label] for r in relevant) # Weight by the rank of the actual label score[i] += n_ranked_above / rank[label] score[i] /= relevant.size return score.mean() def check_alternative_lrap_implementation(lrap_score, n_classes=5, n_samples=20, random_state=0): _, y_true = make_multilabel_classification(n_features=1, allow_unlabeled=False, random_state=random_state, n_classes=n_classes, n_samples=n_samples) # Score with ties y_score = sparse_random_matrix(n_components=y_true.shape[0], n_features=y_true.shape[1], random_state=random_state) if hasattr(y_score, "toarray"): y_score = y_score.toarray() score_lrap = label_ranking_average_precision_score(y_true, y_score) score_my_lrap = _my_lrap(y_true, y_score) assert_almost_equal(score_lrap, score_my_lrap) # Uniform score random_state = check_random_state(random_state) y_score = random_state.uniform(size=(n_samples, n_classes)) score_lrap = label_ranking_average_precision_score(y_true, y_score) score_my_lrap = _my_lrap(y_true, y_score) assert_almost_equal(score_lrap, score_my_lrap) def test_label_ranking_avp(): for fn in [label_ranking_average_precision_score, _my_lrap]: yield check_lrap_toy, fn yield check_lrap_without_tie_and_increasing_score, fn yield check_lrap_only_ties, fn yield check_zero_or_all_relevant_labels, fn yield check_lrap_error_raised, label_ranking_average_precision_score for n_samples, n_classes, random_state in product((1, 2, 8, 20), (2, 5, 10), range(1)): yield (check_alternative_lrap_implementation, label_ranking_average_precision_score, n_classes, n_samples, random_state) def test_coverage_error(): # Toy case assert_almost_equal(coverage_error([[0, 1]], [[0.25, 0.75]]), 1) assert_almost_equal(coverage_error([[0, 1]], [[0.75, 0.25]]), 2) assert_almost_equal(coverage_error([[1, 1]], [[0.75, 0.25]]), 2) assert_almost_equal(coverage_error([[0, 0]], [[0.75, 0.25]]), 0) assert_almost_equal(coverage_error([[0, 0, 0]], [[0.25, 0.5, 0.75]]), 0) assert_almost_equal(coverage_error([[0, 0, 1]], [[0.25, 0.5, 0.75]]), 1) assert_almost_equal(coverage_error([[0, 1, 0]], [[0.25, 0.5, 0.75]]), 2) assert_almost_equal(coverage_error([[0, 1, 1]], [[0.25, 0.5, 0.75]]), 2) assert_almost_equal(coverage_error([[1, 0, 0]], [[0.25, 0.5, 0.75]]), 3) assert_almost_equal(coverage_error([[1, 0, 1]], [[0.25, 0.5, 0.75]]), 3) assert_almost_equal(coverage_error([[1, 1, 0]], [[0.25, 0.5, 0.75]]), 3) assert_almost_equal(coverage_error([[1, 1, 1]], [[0.25, 0.5, 0.75]]), 3) assert_almost_equal(coverage_error([[0, 0, 0]], [[0.75, 0.5, 0.25]]), 0) assert_almost_equal(coverage_error([[0, 0, 1]], [[0.75, 0.5, 0.25]]), 3) assert_almost_equal(coverage_error([[0, 1, 0]], [[0.75, 0.5, 0.25]]), 2) assert_almost_equal(coverage_error([[0, 1, 1]], [[0.75, 0.5, 0.25]]), 3) assert_almost_equal(coverage_error([[1, 0, 0]], [[0.75, 0.5, 0.25]]), 1) assert_almost_equal(coverage_error([[1, 0, 1]], [[0.75, 0.5, 0.25]]), 3) assert_almost_equal(coverage_error([[1, 1, 0]], [[0.75, 0.5, 0.25]]), 2) assert_almost_equal(coverage_error([[1, 1, 1]], [[0.75, 0.5, 0.25]]), 3) assert_almost_equal(coverage_error([[0, 0, 0]], [[0.5, 0.75, 0.25]]), 0) assert_almost_equal(coverage_error([[0, 0, 1]], [[0.5, 0.75, 0.25]]), 3) assert_almost_equal(coverage_error([[0, 1, 0]], [[0.5, 0.75, 0.25]]), 1) assert_almost_equal(coverage_error([[0, 1, 1]], [[0.5, 0.75, 0.25]]), 3) assert_almost_equal(coverage_error([[1, 0, 0]], [[0.5, 0.75, 0.25]]), 2) assert_almost_equal(coverage_error([[1, 0, 1]], [[0.5, 0.75, 0.25]]), 3) assert_almost_equal(coverage_error([[1, 1, 0]], [[0.5, 0.75, 0.25]]), 2) assert_almost_equal(coverage_error([[1, 1, 1]], [[0.5, 0.75, 0.25]]), 3) # Non trival case assert_almost_equal(coverage_error([[0, 1, 0], [1, 1, 0]], [[0.1, 10., -3], [0, 1, 3]]), (1 + 3) / 2.) assert_almost_equal(coverage_error([[0, 1, 0], [1, 1, 0], [0, 1, 1]], [[0.1, 10, -3], [0, 1, 3], [0, 2, 0]]), (1 + 3 + 3) / 3.) assert_almost_equal(coverage_error([[0, 1, 0], [1, 1, 0], [0, 1, 1]], [[0.1, 10, -3], [3, 1, 3], [0, 2, 0]]), (1 + 3 + 3) / 3.) def test_coverage_tie_handling(): assert_almost_equal(coverage_error([[0, 0]], [[0.5, 0.5]]), 0) assert_almost_equal(coverage_error([[1, 0]], [[0.5, 0.5]]), 2) assert_almost_equal(coverage_error([[0, 1]], [[0.5, 0.5]]), 2) assert_almost_equal(coverage_error([[1, 1]], [[0.5, 0.5]]), 2) assert_almost_equal(coverage_error([[0, 0, 0]], [[0.25, 0.5, 0.5]]), 0) assert_almost_equal(coverage_error([[0, 0, 1]], [[0.25, 0.5, 0.5]]), 2) assert_almost_equal(coverage_error([[0, 1, 0]], [[0.25, 0.5, 0.5]]), 2) assert_almost_equal(coverage_error([[0, 1, 1]], [[0.25, 0.5, 0.5]]), 2) assert_almost_equal(coverage_error([[1, 0, 0]], [[0.25, 0.5, 0.5]]), 3) assert_almost_equal(coverage_error([[1, 0, 1]], [[0.25, 0.5, 0.5]]), 3) assert_almost_equal(coverage_error([[1, 1, 0]], [[0.25, 0.5, 0.5]]), 3) assert_almost_equal(coverage_error([[1, 1, 1]], [[0.25, 0.5, 0.5]]), 3) def test_label_ranking_loss(): assert_almost_equal(label_ranking_loss([[0, 1]], [[0.25, 0.75]]), 0) assert_almost_equal(label_ranking_loss([[0, 1]], [[0.75, 0.25]]), 1) assert_almost_equal(label_ranking_loss([[0, 0, 1]], [[0.25, 0.5, 0.75]]), 0) assert_almost_equal(label_ranking_loss([[0, 1, 0]], [[0.25, 0.5, 0.75]]), 1 / 2) assert_almost_equal(label_ranking_loss([[0, 1, 1]], [[0.25, 0.5, 0.75]]), 0) assert_almost_equal(label_ranking_loss([[1, 0, 0]], [[0.25, 0.5, 0.75]]), 2 / 2) assert_almost_equal(label_ranking_loss([[1, 0, 1]], [[0.25, 0.5, 0.75]]), 1 / 2) assert_almost_equal(label_ranking_loss([[1, 1, 0]], [[0.25, 0.5, 0.75]]), 2 / 2) # Undefined metrics - the ranking doesn't matter assert_almost_equal(label_ranking_loss([[0, 0]], [[0.75, 0.25]]), 0) assert_almost_equal(label_ranking_loss([[1, 1]], [[0.75, 0.25]]), 0) assert_almost_equal(label_ranking_loss([[0, 0]], [[0.5, 0.5]]), 0) assert_almost_equal(label_ranking_loss([[1, 1]], [[0.5, 0.5]]), 0) assert_almost_equal(label_ranking_loss([[0, 0, 0]], [[0.5, 0.75, 0.25]]), 0) assert_almost_equal(label_ranking_loss([[1, 1, 1]], [[0.5, 0.75, 0.25]]), 0) assert_almost_equal(label_ranking_loss([[0, 0, 0]], [[0.25, 0.5, 0.5]]), 0) assert_almost_equal(label_ranking_loss([[1, 1, 1]], [[0.25, 0.5, 0.5]]), 0) # Non trival case assert_almost_equal(label_ranking_loss([[0, 1, 0], [1, 1, 0]], [[0.1, 10., -3], [0, 1, 3]]), (0 + 2 / 2) / 2.) assert_almost_equal(label_ranking_loss( [[0, 1, 0], [1, 1, 0], [0, 1, 1]], [[0.1, 10, -3], [0, 1, 3], [0, 2, 0]]), (0 + 2 / 2 + 1 / 2) / 3.) assert_almost_equal(label_ranking_loss( [[0, 1, 0], [1, 1, 0], [0, 1, 1]], [[0.1, 10, -3], [3, 1, 3], [0, 2, 0]]), (0 + 2 / 2 + 1 / 2) / 3.) # Sparse csr matrices assert_almost_equal(label_ranking_loss( csr_matrix(np.array([[0, 1, 0], [1, 1, 0]])), [[0.1, 10, -3], [3, 1, 3]]), (0 + 2 / 2) / 2.) def test_ranking_appropriate_input_shape(): # Check that y_true.shape != y_score.shape raise the proper exception assert_raises(ValueError, label_ranking_loss, [[0, 1], [0, 1]], [0, 1]) assert_raises(ValueError, label_ranking_loss, [[0, 1], [0, 1]], [[0, 1]]) assert_raises(ValueError, label_ranking_loss, [[0, 1], [0, 1]], [[0], [1]]) assert_raises(ValueError, label_ranking_loss, [[0, 1]], [[0, 1], [0, 1]]) assert_raises(ValueError, label_ranking_loss, [[0], [1]], [[0, 1], [0, 1]]) assert_raises(ValueError, label_ranking_loss, [[0, 1], [0, 1]], [[0], [1]]) def test_ranking_loss_ties_handling(): # Tie handling assert_almost_equal(label_ranking_loss([[1, 0]], [[0.5, 0.5]]), 1) assert_almost_equal(label_ranking_loss([[0, 1]], [[0.5, 0.5]]), 1) assert_almost_equal(label_ranking_loss([[0, 0, 1]], [[0.25, 0.5, 0.5]]), 1 / 2) assert_almost_equal(label_ranking_loss([[0, 1, 0]], [[0.25, 0.5, 0.5]]), 1 / 2) assert_almost_equal(label_ranking_loss([[0, 1, 1]], [[0.25, 0.5, 0.5]]), 0) assert_almost_equal(label_ranking_loss([[1, 0, 0]], [[0.25, 0.5, 0.5]]), 1) assert_almost_equal(label_ranking_loss([[1, 0, 1]], [[0.25, 0.5, 0.5]]), 1) assert_almost_equal(label_ranking_loss([[1, 1, 0]], [[0.25, 0.5, 0.5]]), 1)
bsd-3-clause
ahoyosid/scikit-learn
sklearn/metrics/metrics.py
232
1262
import warnings warnings.warn("sklearn.metrics.metrics is deprecated and will be removed in " "0.18. Please import from sklearn.metrics", DeprecationWarning) from .ranking import auc from .ranking import average_precision_score from .ranking import label_ranking_average_precision_score from .ranking import precision_recall_curve from .ranking import roc_auc_score from .ranking import roc_curve from .classification import accuracy_score from .classification import classification_report from .classification import confusion_matrix from .classification import f1_score from .classification import fbeta_score from .classification import hamming_loss from .classification import hinge_loss from .classification import jaccard_similarity_score from .classification import log_loss from .classification import matthews_corrcoef from .classification import precision_recall_fscore_support from .classification import precision_score from .classification import recall_score from .classification import zero_one_loss from .regression import explained_variance_score from .regression import mean_absolute_error from .regression import mean_squared_error from .regression import median_absolute_error from .regression import r2_score
bsd-3-clause
idlead/scikit-learn
examples/linear_model/plot_bayesian_ridge.py
17
2733
""" ========================= Bayesian Ridge Regression ========================= Computes a Bayesian Ridge Regression on a synthetic dataset. See :ref:`bayesian_ridge_regression` for more information on the regressor. Compared to the OLS (ordinary least squares) estimator, the coefficient weights are slightly shifted toward zeros, which stabilises them. As the prior on the weights is a Gaussian prior, the histogram of the estimated weights is Gaussian. The estimation of the model is done by iteratively maximizing the marginal log-likelihood of the observations. """ print(__doc__) import numpy as np import matplotlib.pyplot as plt from scipy import stats from sklearn.linear_model import BayesianRidge, LinearRegression ############################################################################### # Generating simulated data with Gaussian weigthts np.random.seed(0) n_samples, n_features = 100, 100 X = np.random.randn(n_samples, n_features) # Create Gaussian data # Create weigts with a precision lambda_ of 4. lambda_ = 4. w = np.zeros(n_features) # Only keep 10 weights of interest relevant_features = np.random.randint(0, n_features, 10) for i in relevant_features: w[i] = stats.norm.rvs(loc=0, scale=1. / np.sqrt(lambda_)) # Create noise with a precision alpha of 50. alpha_ = 50. noise = stats.norm.rvs(loc=0, scale=1. / np.sqrt(alpha_), size=n_samples) # Create the target y = np.dot(X, w) + noise ############################################################################### # Fit the Bayesian Ridge Regression and an OLS for comparison clf = BayesianRidge(compute_score=True) clf.fit(X, y) ols = LinearRegression() ols.fit(X, y) ############################################################################### # Plot true weights, estimated weights and histogram of the weights lw = 2 plt.figure(figsize=(6, 5)) plt.title("Weights of the model") plt.plot(clf.coef_, color='lightgreen', linewidth=lw, label="Bayesian Ridge estimate") plt.plot(w, color='gold', linewidth=lw, label="Ground truth") plt.plot(ols.coef_, color='navy', linestyle='--', label="OLS estimate") plt.xlabel("Features") plt.ylabel("Values of the weights") plt.legend(loc="best", prop=dict(size=12)) plt.figure(figsize=(6, 5)) plt.title("Histogram of the weights") plt.hist(clf.coef_, bins=n_features, color='gold', log=True) plt.scatter(clf.coef_[relevant_features], 5 * np.ones(len(relevant_features)), color='navy', label="Relevant features") plt.ylabel("Features") plt.xlabel("Values of the weights") plt.legend(loc="upper left") plt.figure(figsize=(6, 5)) plt.title("Marginal log-likelihood") plt.plot(clf.scores_, color='navy', linewidth=lw) plt.ylabel("Score") plt.xlabel("Iterations") plt.show()
bsd-3-clause
idlead/scikit-learn
sklearn/utils/metaestimators.py
281
2353
"""Utilities for meta-estimators""" # Author: Joel Nothman # Andreas Mueller # Licence: BSD from operator import attrgetter from functools import update_wrapper __all__ = ['if_delegate_has_method'] class _IffHasAttrDescriptor(object): """Implements a conditional property using the descriptor protocol. Using this class to create a decorator will raise an ``AttributeError`` if the ``attribute_name`` is not present on the base object. This allows ducktyping of the decorated method based on ``attribute_name``. See https://docs.python.org/3/howto/descriptor.html for an explanation of descriptors. """ def __init__(self, fn, attribute_name): self.fn = fn self.get_attribute = attrgetter(attribute_name) # update the docstring of the descriptor update_wrapper(self, fn) def __get__(self, obj, type=None): # raise an AttributeError if the attribute is not present on the object if obj is not None: # delegate only on instances, not the classes. # this is to allow access to the docstrings. self.get_attribute(obj) # lambda, but not partial, allows help() to work with update_wrapper out = lambda *args, **kwargs: self.fn(obj, *args, **kwargs) # update the docstring of the returned function update_wrapper(out, self.fn) return out def if_delegate_has_method(delegate): """Create a decorator for methods that are delegated to a sub-estimator This enables ducktyping by hasattr returning True according to the sub-estimator. >>> from sklearn.utils.metaestimators import if_delegate_has_method >>> >>> >>> class MetaEst(object): ... def __init__(self, sub_est): ... self.sub_est = sub_est ... ... @if_delegate_has_method(delegate='sub_est') ... def predict(self, X): ... return self.sub_est.predict(X) ... >>> class HasPredict(object): ... def predict(self, X): ... return X.sum(axis=1) ... >>> class HasNoPredict(object): ... pass ... >>> hasattr(MetaEst(HasPredict()), 'predict') True >>> hasattr(MetaEst(HasNoPredict()), 'predict') False """ return lambda fn: _IffHasAttrDescriptor(fn, '%s.%s' % (delegate, fn.__name__))
bsd-3-clause
txomon/SpockBot
spock/mcmap/mapdata.py
1
41665
from spock.utils import BoundingBox # Materials MCM_MAT_ROCK = 0x00 MCM_MAT_DIRT = 0x01 MCM_MAT_WOOD = 0x02 MCM_MAT_WEB = 0x03 MCM_MAT_WOOL = 0x04 MCM_MAT_VINE = 0x05 MCM_MAT_LEAVES = 0x06 # Gate MCM_GATE_SOUTH = 0x00 MCM_GATE_WEST = 0x01 MCM_GATE_NORTH = 0x02 MCM_GATE_EAST = 0x03 MCM_GATE_CLOSE = 0x00 MCM_GATE_OPEN = 0x01 MCM_GATE_UNPOWERED = 0x00 MCM_GATE_POWERED = 0x01 # Door MCM_DOOR_WEST = 0x00 MCM_DOOR_NORTH = 0x01 MCM_DOOR_EAST = 0x02 MCM_DOOR_SOUTH = 0x03 MCM_DOOR_CLOSE = 0x00 MCM_DOOR_OPEN = 0x01 MCM_DOOR_LOWER = 0x00 MCM_DOOR_UPPER = 0x01 MCM_DOOR_HINGE_LEFT = 0x00 MCM_DOOR_HINGE_RIGHT = 0x01 # Trapdoor MCM_TRAPDOOR_WEST = 0x00 MCM_TRAPDOOR_NORTH = 0x01 MCM_TRAPDOOR_EAST = 0x02 MCM_TRAPDOOR_SOUTH = 0x03 MCM_TRAPDOOR_CLOSE = 0x00 MCM_TRAPDOOR_OPEN = 0x01 MCM_TRAPDOOR_LOWER = 0x00 MCM_TRAPDOOR_UPPER = 0x01 # Slab MCM_SLAB_LOWER = 0x00 MCM_SLAB_UPPER = 0x01 blocks = {} def map_block(block_id): def inner(cl): blocks[block_id] = cl cl.block_id = block_id return cl return inner def get_block(block_id, meta=0, init=True): if init: return blocks[block_id](meta) if block_id < len(blocks) else None else: return blocks[block_id] if block_id < len(blocks) else None class MapBlock(object): display_name = 'Map Block' name = 'map_block' hardness = 0 stack_size = 64 diggable = True material = None harvest_tools = None def __init__(self, meta): self.bounding_box = BoundingBox(1, 1) def change_meta(self, meta): pass class FenceBlock(MapBlock): def __init__(self, meta): self.bounding_box = BoundingBox(1, 1.5) class GateBlock(MapBlock): def __init__(self, meta): self.direction = meta & 0x03 self.open = (meta >> 2) & 0x01 == MCM_GATE_OPEN self.powered = meta >> 3 == MCM_GATE_POWERED if self.open: self.bounding_box = None else: self.bounding_box = BoundingBox(1, 1.5) class DoorBlock(MapBlock): def __init__(self, meta): self.section = (meta >> 3) & 0x1 if self.section == MCM_DOOR_LOWER: self.open = (meta >> 2) & 0x01 == MCM_DOOR_OPEN self.direction = meta & 0x03 if not self.open: self.bounding_box = BoundingBox(1, 2) else: self.bounding_box = None elif self.section == MCM_DOOR_UPPER: self.hinge = meta & 0x01 self.bounding_box = None class SlabBlock(MapBlock): def __init__(self, meta): self.orientation = (meta >> 3) & 0x1 self.bounding_box = BoundingBox(1, 1) class StairBlock(MapBlock): def __init__(self, meta): self.bounding_box = BoundingBox(1, 1) class TrapdoorBlock(MapBlock): def __init__(self, meta): self.direction = meta & 0x03 self.open = (meta >> 2) & 0x01 == MCM_TRAPDOOR_OPEN self.orientation = (meta >> 3) & 0x1 if self.open == MCM_TRAPDOOR_OPEN: self.bounding_box = None elif self.orientation == MCM_TRAPDOOR_UPPER: self.bounding_box = BoundingBox(1, 1) elif self.orientation == MCM_TRAPDOOR_LOWER: self.bounding_box = BoundingBox(1, 0.4) class NoCollisionBlock(MapBlock): def __init__(self, meta): self.bounding_box = None @map_block(0) class AirBlock(NoCollisionBlock): display_name = 'Air' name = 'air' diggable = False @map_block(1) class StoneBlock(MapBlock): display_name = 'Stone' name = 'stone' hardness = 1.5 material = MCM_MAT_ROCK harvest_tools = (270, 274, 257, 278, 285) @map_block(2) class GrassBlock(MapBlock): display_name = 'Grass Block' name = 'grass' hardness = 0.6 material = MCM_MAT_DIRT @map_block(3) class DirtBlock(MapBlock): display_name = 'Dirt' name = 'dirt' hardness = 0.5 material = MCM_MAT_DIRT @map_block(4) class CobbleBlock(MapBlock): display_name = 'Cobblestone' name = 'stonebrick' hardness = 2 material = MCM_MAT_ROCK harvest_tools = (270, 274, 257, 278, 285) @map_block(5) class WoodplankBlock(MapBlock): display_name = 'Wooden Planks' name = 'wood' hardness = 2 material = MCM_MAT_WOOD @map_block(6) class SaplingBlock(NoCollisionBlock): display_name = 'Sapling' name = 'sapling' @map_block(7) class BedrockBlock(MapBlock): display_name = 'Bedrock' name = 'bedrock' hardness = None diggable = False @map_block(8) class WaterBlock(NoCollisionBlock): display_name = 'Water' name = 'water' hardness = 100 diggable = False @map_block(9) class StationarywaterBlock(NoCollisionBlock): display_name = 'Stationary Water' name = 'waterStationary' hardness = 100 diggable = False @map_block(10) class LavaBlock(NoCollisionBlock): display_name = 'Lava' name = 'lava' hardness = 100 diggable = False @map_block(11) class StationarylavaBlock(NoCollisionBlock): display_name = 'Stationary Lava' name = 'lavaStationary' hardness = 100 diggable = False @map_block(12) class SandBlock(MapBlock): display_name = 'Sand' name = 'sand' hardness = 0.5 material = MCM_MAT_DIRT @map_block(13) class GravelBlock(MapBlock): display_name = 'Gravel' name = 'gravel' hardness = 0.6 material = MCM_MAT_DIRT @map_block(14) class GoldoreBlock(MapBlock): display_name = 'Gold Ore' name = 'oreGold' hardness = 3 material = MCM_MAT_ROCK harvest_tools = (257, 278) @map_block(15) class IronoreBlock(MapBlock): display_name = 'Iron Ore' name = 'oreIron' hardness = 3 material = MCM_MAT_ROCK harvest_tools = (274, 257, 278) @map_block(16) class CoaloreBlock(MapBlock): display_name = 'Coal Ore' name = 'oreCoal' hardness = 3 material = MCM_MAT_ROCK harvest_tools = (270, 274, 257, 278, 285) @map_block(17) class Woodblock(MapBlock): display_name = 'Wood' name = 'log' hardness = 2 material = MCM_MAT_WOOD @map_block(18) class LeavesBlock(MapBlock): display_name = 'Leaves' name = 'leaves' hardness = 0.2 material = MCM_MAT_LEAVES @map_block(19) class SpongeBlock(MapBlock): display_name = 'Sponge' name = 'sponge' hardness = 0.6 @map_block(20) class GlassBlock(MapBlock): display_name = 'Glass' name = 'glass' hardness = 0.3 @map_block(21) class LapisoreBlock(MapBlock): display_name = 'Lapis Lazuli Ore' name = 'oreLapis' hardness = 3 material = MCM_MAT_ROCK harvest_tools = (274, 257, 278) @map_block(22) class LapisBlock(MapBlock): display_name = 'Lapis Lazuli Block' name = 'blockLapis' hardness = 3 material = MCM_MAT_ROCK harvest_tools = (274, 257, 278) @map_block(23) class DispenserBlock(MapBlock): display_name = 'Dispenser' name = 'dispenser' hardness = 3.5 material = MCM_MAT_ROCK harvest_tools = (270, 274, 257, 278, 285) @map_block(24) class SandstoneBlock(MapBlock): display_name = 'Sandstone' name = 'sandStone' hardness = 0.8 material = MCM_MAT_ROCK harvest_tools = (270, 274, 257, 278, 285) @map_block(25) class NoteBlock(MapBlock): display_name = 'Note Block' name = 'musicBlock' hardness = 0.8 material = MCM_MAT_WOOD @map_block(26) class BedBlock(MapBlock): display_name = 'Bed' name = 'bed' hardness = 0.2 stack_size = 1 @map_block(27) class PoweredrailBlock(NoCollisionBlock): display_name = 'Powered Rail' name = 'goldenRail' hardness = 0.7 material = MCM_MAT_ROCK @map_block(28) class DetectorrailBlock(NoCollisionBlock): display_name = 'Detector Rail' name = 'detectorRail' hardness = 0.7 material = MCM_MAT_ROCK @map_block(29) class StickypistonBlock(MapBlock): display_name = 'Sticky Piston' name = 'pistonStickyBase' @map_block(30) class CobwebBlock(NoCollisionBlock): display_name = 'Cobweb' name = 'web' hardness = 4 material = MCM_MAT_WEB harvest_tools = (359, 267, 268, 272, 276, 283) @map_block(31) class TallgrassBlock(NoCollisionBlock): display_name = 'Grass' name = 'tallgrass' @map_block(32) class DeadbushBlock(NoCollisionBlock): display_name = 'Dead Bush' name = 'deadbush' @map_block(33) class PistonBlock(MapBlock): display_name = 'Piston' name = 'pistonBase' @map_block(34) class PistonextensionBlock(MapBlock): display_name = 'Piston Extension' name = 'pistonExtension' @map_block(35) class WoolBlock(MapBlock): display_name = 'Wool' name = 'cloth' hardness = 0.8 material = MCM_MAT_WOOL @map_block(36) class PistonmovedBlock(MapBlock): display_name = 'Block Moved by Piston' name = 'blockMovedByPiston' @map_block(37) class FlowerBlock(NoCollisionBlock): display_name = 'Flower' name = 'flower' @map_block(38) class RoseBlock(NoCollisionBlock): display_name = 'Rose' name = 'rose' @map_block(39) class BrownshroomBlock(NoCollisionBlock): display_name = 'Brown Mushroom' name = 'mushroomBrown' @map_block(40) class RedshroomBlock(NoCollisionBlock): display_name = 'Red Mushroom' name = 'mushroomRed' @map_block(41) class GoldBlock(MapBlock): display_name = 'Block of Gold' name = 'blockGold' hardness = 3 material = MCM_MAT_ROCK harvest_tools = (257, 278) @map_block(42) class IronBlock(MapBlock): display_name = 'Block of Iron' name = 'blockIron' hardness = 5 material = MCM_MAT_ROCK harvest_tools = (274, 257, 278) @map_block(43) class DoublestoneslabBlock(MapBlock): display_name = 'Double Stone Slab' name = 'stoneSlabDouble' hardness = 2 material = MCM_MAT_ROCK harvest_tools = (270, 274, 257, 278, 285) @map_block(44) class StoneslabBlock(SlabBlock): display_name = 'Stone Slab' name = 'stoneSlab' hardness = 2 @map_block(45) class BricksBlock(MapBlock): display_name = 'Bricks' name = 'brick' hardness = 2 material = MCM_MAT_ROCK harvest_tools = (270, 274, 257, 278, 285) @map_block(46) class TntBlock(MapBlock): display_name = 'TNT' name = 'tnt' @map_block(47) class BookshelfBlock(MapBlock): display_name = 'Bookshelf' name = 'bookshelf' hardness = 1.5 material = MCM_MAT_WOOD @map_block(48) class MossstoneBlock(MapBlock): display_name = 'Moss Stone' name = 'stoneMoss' hardness = 2 material = MCM_MAT_ROCK harvest_tools = (270, 274, 257, 278, 285) @map_block(49) class ObsidianBlock(MapBlock): display_name = 'Obsidian' name = 'obsidian' hardness = 50 material = MCM_MAT_ROCK harvest_tools = (278,) @map_block(50) class TorchBlock(NoCollisionBlock): display_name = 'Torch' name = 'torch' @map_block(51) class FireBlock(NoCollisionBlock): display_name = 'Fire' name = 'fire' @map_block(52) class MobspawnerBlock(MapBlock): display_name = 'Monster Spawner' name = 'mobSpawner' hardness = 5 material = MCM_MAT_ROCK harvest_tools = (270, 274, 257, 278, 285) @map_block(53) class WoodstairBlock(StairBlock): display_name = 'Wooden Stairs' name = 'stairsWood' material = MCM_MAT_WOOD @map_block(54) class ChestBlock(MapBlock): display_name = 'Chest' name = 'chest' hardness = 2.5 material = MCM_MAT_WOOD @map_block(55) class RedstonedustBlock(NoCollisionBlock): display_name = 'Redstone Dust' name = 'redstoneDust' @map_block(56) class DiamondoreBlock(MapBlock): display_name = 'Diamond Ore' name = 'oreDiamond' hardness = 3 material = MCM_MAT_ROCK harvest_tools = (257, 278) @map_block(57) class DiamondBlock(MapBlock): display_name = 'Block of Diamond' name = 'blockDiamond' hardness = 5 material = MCM_MAT_ROCK harvest_tools = (257, 278) @map_block(58) class CraftingBlock(MapBlock): display_name = 'Crafting Table' name = 'workbench' hardness = 2.5 material = MCM_MAT_WOOD @map_block(59) class CropsBlock(NoCollisionBlock): display_name = 'Wheat Crops' name = 'wheat' @map_block(60) class FarmBlock(MapBlock): display_name = 'Farmland' name = 'farmland' hardness = 0.6 material = MCM_MAT_DIRT @map_block(61) class FurnaceBlock(MapBlock): display_name = 'Furnace' name = 'furnace' hardness = 3.5 material = MCM_MAT_ROCK harvest_tools = (270, 274, 257, 278, 285) @map_block(62) class BurningfurnaceBlock(MapBlock): display_name = 'Burning Furnace' name = 'furnaceBurning' hardness = 3.5 material = MCM_MAT_ROCK harvest_tools = (270, 274, 257, 278, 285) @map_block(63) class StandingsignBlock(NoCollisionBlock): display_name = 'Sign Post' name = 'signPost' hardness = 1 stack_size = 16 material = MCM_MAT_WOOD @map_block(64) class WooddoorBlock(DoorBlock): display_name = 'Wooden Door' name = 'doorWood' hardness = 3 stack_size = 1 material = MCM_MAT_WOOD @map_block(65) class LadderBlock(NoCollisionBlock): display_name = 'Ladder' name = 'ladder' hardness = 0.4 @map_block(66) class RailBlock(NoCollisionBlock): display_name = 'Rail' name = 'rail' hardness = 0.7 material = MCM_MAT_ROCK @map_block(67) class CobblestairBlock(StairBlock): display_name = 'Cobblestone Stairs' name = 'stairsStone' material = MCM_MAT_ROCK harvest_tools = (270, 274, 257, 278, 285) @map_block(68) class WallsignBlock(NoCollisionBlock): display_name = 'Wall Sign' name = 'signWall' hardness = 1 stack_size = 16 material = MCM_MAT_WOOD @map_block(69) class LeverBlock(NoCollisionBlock): display_name = 'Lever' name = 'lever' hardness = 0.5 @map_block(70) class StoneplateBlock(NoCollisionBlock): display_name = 'Stone Pressure Plate' name = 'stonePressurePlate' hardness = 0.5 material = MCM_MAT_ROCK harvest_tools = (270, 274, 257, 278, 285) @map_block(71) class IrondoorBlock(DoorBlock): display_name = 'Iron Door' name = 'doorIron' hardness = 5 stack_size = 1 material = MCM_MAT_ROCK harvest_tools = (270, 274, 257, 278, 285) @map_block(72) class WoodplateBlock(NoCollisionBlock): display_name = 'Wooden Pressure Plate' name = 'woodPressurePlate' hardness = 0.5 material = MCM_MAT_WOOD @map_block(73) class RedstoneoreBlock(MapBlock): display_name = 'Redstone Ore' name = 'oreRedstone' hardness = 3 material = MCM_MAT_ROCK harvest_tools = (257, 278) @map_block(74) class GlowingredstoneoreBlock(MapBlock): display_name = 'Glowing Redstone Ore' name = 'oreRedstoneGlowing' hardness = 3 material = MCM_MAT_ROCK harvest_tools = (257, 278) @map_block(75) class RedstonetorchoffBlock(NoCollisionBlock): display_name = 'Redstone Torch (Inactive)' name = 'notGateInactive' @map_block(76) class RedstonetorchonBlock(NoCollisionBlock): display_name = 'Redstone Torch (Active)' name = 'notGateActive' @map_block(77) class StonebuttonBlock(NoCollisionBlock): display_name = 'Stone Button' name = 'buttonStone' hardness = 0.5 @map_block(78) class GroundsnowBlock(NoCollisionBlock): display_name = 'Snow' name = 'snow' hardness = 0.1 material = MCM_MAT_DIRT harvest_tools = (269, 273, 256, 277, 284) @map_block(79) class IceBlock(MapBlock): display_name = 'Ice' name = 'ice' hardness = 0.5 material = MCM_MAT_ROCK @map_block(80) class SnowBlock(MapBlock): display_name = 'Snow Block' name = 'snowBlock' hardness = 0.2 material = MCM_MAT_DIRT harvest_tools = (269, 273, 256, 277, 284) @map_block(81) class CactusBlock(MapBlock): display_name = 'Cactus' name = 'cactus' hardness = 0.4 @map_block(82) class ClayBlock(MapBlock): display_name = 'Clay' name = 'clay' hardness = 0.6 material = MCM_MAT_DIRT @map_block(83) class ReedsBlock(NoCollisionBlock): display_name = 'Sugar cane' name = 'reeds' @map_block(84) class JukeboxBlock(MapBlock): display_name = 'Jukebox' name = 'jukebox' hardness = 2 material = MCM_MAT_WOOD @map_block(85) class WoodfenceBlock(FenceBlock): display_name = 'Fence' name = 'fence' hardness = 2 material = MCM_MAT_WOOD @map_block(86) class PumpkinBlock(MapBlock): display_name = 'Pumpkin' name = 'pumpkin' hardness = 1 material = 'plant' @map_block(87) class NetherrackBlock(MapBlock): display_name = 'Netherrack' name = 'hellrock' hardness = 0.4 material = MCM_MAT_ROCK harvest_tools = (270, 274, 257, 278, 285) @map_block(88) class SoulsandBlock(MapBlock): display_name = 'Soul Sand' name = 'hellsand' hardness = 0.5 material = MCM_MAT_DIRT @map_block(89) class GlowstoneBlock(MapBlock): display_name = 'Glowstone' name = 'lightgem' hardness = 0.3 @map_block(90) class PortalBlock(NoCollisionBlock): display_name = 'Portal' name = 'portal' hardness = None diggable = False @map_block(91) class JackBlock(MapBlock): display_name = 'Jack \'o\' Lantern' name = 'litpumpkin' hardness = 1 material = 'plant' @map_block(92) class CakeBlock(MapBlock): display_name = 'Cake' name = 'cake' hardness = 0.5 stack_size = 1 @map_block(93) class RedstonerepoffBlock(NoCollisionBlock): display_name = 'Redstone Repeater (Inactive)' name = 'redstoneRepeaterInactive' @map_block(94) class RedstonereponBlock(NoCollisionBlock): display_name = 'Redstone Repeater (Active)' name = 'redstoneRepeaterActive' @map_block(95) class LockedchestBlock(MapBlock): display_name = 'Locked chest' name = 'lockedchest' @map_block(96) class OaktrapdoorBlock(TrapdoorBlock): display_name = 'Trapdoor' name = 'trapdoor' hardness = 3 material = MCM_MAT_WOOD @map_block(97) class MonstereggBlock(MapBlock): display_name = 'Monster Egg' name = 'monsterStoneEgg' hardness = 0.75 @map_block(98) class StonebrickBlock(MapBlock): display_name = 'Stone Brick' name = 'stonebricksmooth' hardness = 1.5 material = MCM_MAT_ROCK harvest_tools = (270, 274, 257, 278, 285) @map_block(99) class HugebrownshroomBlock(MapBlock): display_name = 'Huge Brown Mushroom' name = 'mushroomHugeBrown' hardness = 0.2 material = MCM_MAT_WOOD @map_block(100) class HugeredshroomBlock(MapBlock): display_name = 'Huge Red Mushroom' name = 'mushroomHugeRed' hardness = 0.2 material = MCM_MAT_WOOD @map_block(101) class IronfenceBlock(FenceBlock): display_name = 'Iron Bars' name = 'fenceIron' hardness = 5 material = MCM_MAT_ROCK harvest_tools = (270, 274, 257, 278, 285) @map_block(102) class GlasspaneBlock(MapBlock): display_name = 'Glass Pane' name = 'glassPane' hardness = 0.3 @map_block(103) class MelonBlock(MapBlock): display_name = 'Melon' name = 'melon' hardness = 1 material = 'melon' @map_block(104) class PumpkinstemBlock(NoCollisionBlock): display_name = 'Pumpkin Stem' name = 'pumpkinStem' @map_block(105) class MelonstemBlock(NoCollisionBlock): display_name = 'Melon Stem' name = 'melonStem' @map_block(106) class VinesBlock(NoCollisionBlock): display_name = 'Vines' name = 'vine' hardness = 0.2 material = MCM_MAT_VINE @map_block(107) class WoodfencegateBlock(GateBlock): display_name = 'Fence Gate' name = 'fenceGate' hardness = 2 material = MCM_MAT_WOOD @map_block(108) class BrickstairBlock(StairBlock): display_name = 'Brick Stairs' name = 'stairsBrick' material = MCM_MAT_ROCK harvest_tools = (270, 274, 257, 278, 285) @map_block(109) class StonebrickstairBlock(StairBlock): display_name = 'Stone Brick Stairs' name = 'stairsStoneBrickSmooth' material = MCM_MAT_ROCK harvest_tools = (270, 274, 257, 278, 285) @map_block(110) class MyceliumBlock(MapBlock): display_name = 'Mycelium' name = 'mycel' hardness = 0.6 material = MCM_MAT_DIRT @map_block(111) class LilypadBlock(MapBlock): display_name = 'Lily Pad' name = 'waterlily' def __init__(self, meta): self.bounding_box = BoundingBox(1, 0.2, 1) @map_block(112) class NetherbrickBlock(MapBlock): display_name = 'Nether Brick' name = 'netherBrick' hardness = 2 material = MCM_MAT_ROCK harvest_tools = (270, 274, 257, 278, 285) @map_block(113) class NetherbrickfenceBlock(FenceBlock): display_name = 'Nether Brick Fence' name = 'netherFence' hardness = 2 material = MCM_MAT_ROCK harvest_tools = (270, 274, 257, 278, 285) @map_block(114) class NetherbrickstairBlock(StairBlock): display_name = 'Nether Brick Stairs' name = 'stairsNetherBrick' material = MCM_MAT_ROCK harvest_tools = (270, 274, 257, 278, 285) @map_block(115) class NetherwartBlock(NoCollisionBlock): display_name = 'Nether Wart' name = 'netherStalk' @map_block(116) class EnchantmentBlock(MapBlock): display_name = 'Enchantment Table' name = 'enchantmentTable' hardness = 5 material = MCM_MAT_ROCK harvest_tools = (270, 274, 257, 278, 285) @map_block(117) class BrewingBlock(MapBlock): display_name = 'Brewing Stand' name = 'brewingStand' hardness = 0.5 material = MCM_MAT_ROCK harvest_tools = (270, 274, 257, 278, 285) @map_block(118) class CauldronBlock(MapBlock): display_name = 'Cauldron' name = 'cauldron' hardness = 2 material = MCM_MAT_ROCK harvest_tools = (270, 274, 257, 278, 285) @map_block(119) class EndportalBlock(NoCollisionBlock): display_name = 'End Portal' name = 'endPortal' hardness = None diggable = False @map_block(120) class EndportalframeBlock(MapBlock): display_name = 'End Portal Frame' name = 'endPortalFrame' hardness = None diggable = False @map_block(121) class EndstoneBlock(MapBlock): display_name = 'End Stone' name = 'whiteStone' hardness = 3 material = MCM_MAT_ROCK harvest_tools = (270, 274, 257, 278, 285) @map_block(122) class DragoneggBlock(MapBlock): display_name = 'Dragon Egg' name = 'dragonEgg' hardness = 3 @map_block(123) class RedstonelampoffBlock(MapBlock): display_name = 'Redstone Lamp (Inactive)' name = 'redstoneLightInactive' hardness = 0.3 @map_block(124) class RedstonelamponBlock(MapBlock): display_name = 'Redstone Lamp (Active)' name = 'redstoneLightActive' hardness = 0.3 @map_block(125) class WooddoubleslabBlock(MapBlock): display_name = 'Wooden Double Slab' name = 'woodSlabDouble' hardness = 2 material = MCM_MAT_WOOD @map_block(126) class WoodslabBlock(SlabBlock): display_name = 'Wooden Slab' name = 'woodSlab' hardness = 2 @map_block(127) class CocoapodBlock(MapBlock): display_name = 'Cocoa Pod' name = 'cocoa' hardness = 0.2 material = 'plant' @map_block(128) class SandstonestairBlock(StairBlock): display_name = 'Sandstone Stairs' name = 'stairsSandStone' hardness = 0.8 material = MCM_MAT_ROCK harvest_tools = (270, 274, 257, 278, 285) @map_block(129) class EmeraldoreBlock(MapBlock): display_name = 'Emerald Ore' name = 'oreEmerald' hardness = 3 material = MCM_MAT_ROCK harvest_tools = (257, 278) @map_block(130) class EnderchestBlock(MapBlock): display_name = 'Ender Chest' name = 'enderChest' hardness = 22.5 material = MCM_MAT_ROCK harvest_tools = (270, 274, 257, 278, 285) @map_block(131) class TripwirehookBlock(NoCollisionBlock): display_name = 'Tripwire Hook' name = 'tripWireSource' @map_block(132) class TripwireBlock(NoCollisionBlock): display_name = 'Tripwire' name = 'tripWire' @map_block(133) class EmeraldBlock(MapBlock): display_name = 'Block of Emerald' name = 'blockEmerald' hardness = 5 material = MCM_MAT_ROCK harvest_tools = (257, 278) @map_block(134) class SprucestairBlock(StairBlock): display_name = 'Spruce Wood Stairs' name = 'stairsWoodSpruce' @map_block(135) class BirchstairBlock(StairBlock): display_name = 'Birch Wood Stairs' name = 'stairsWoodBirch' @map_block(136) class JunglestairBlock(StairBlock): display_name = 'Jungle Wood Stairs' name = 'stairsWoodJungle' @map_block(137) class CommandBlock(MapBlock): display_name = 'Command Block' name = 'commandBlock' hardness = None diggable = False material = MCM_MAT_ROCK harvest_tools = (270, 274, 257, 278, 285) @map_block(138) class BeaconBlock(MapBlock): display_name = 'Beacon' name = 'beacon' hardness = 3 @map_block(139) class CobblewallBlock(FenceBlock): display_name = 'Cobblestone Wall' name = 'cobbleWall' material = MCM_MAT_ROCK harvest_tools = (270, 274, 257, 278, 285) @map_block(140) class FlowerpotBlock(MapBlock): display_name = 'Flower Pot' name = 'flowerPot' @map_block(141) class CarrotBlock(NoCollisionBlock): display_name = 'Carrots' name = 'carrots' @map_block(142) class PotatoBlock(NoCollisionBlock): display_name = 'Potatoes' name = 'potatoes' @map_block(143) class WoodbuttonBlock(NoCollisionBlock): display_name = 'Wooden Button' name = 'buttonWood' hardness = 0.5 @map_block(144) class MobheadBlock(MapBlock): display_name = 'Mob Head' name = 'skull' hardness = 1 @map_block(145) class AnvilBlock(MapBlock): display_name = 'Anvil' name = 'anvil' hardness = 5 material = MCM_MAT_ROCK harvest_tools = (270, 274, 257, 278, 285) @map_block(146) class TrappedchestBlock(MapBlock): display_name = 'Trapped Chest' name = 'trappedChest' hardness = 2.5 material = MCM_MAT_WOOD @map_block(147) class WeightedplatelightBlock(NoCollisionBlock): display_name = 'Weighted Pressure plate (Light)' name = 'pressurePlateWeightedLight' hardness = 0.5 material = MCM_MAT_ROCK harvest_tools = (270, 274, 257, 278, 285) @map_block(148) class WeightedplateheavyBlock(NoCollisionBlock): display_name = 'Weighted Pressure plate (Heavy)' name = 'pressurePlateWeightedHeavy' hardness = 0.5 material = MCM_MAT_ROCK harvest_tools = (270, 274, 257, 278, 285) @map_block(149) class ComparatoroffBlock(NoCollisionBlock): display_name = 'Redstone Comparator (Inactive)' name = 'redstoneComparatorInactive' @map_block(150) class ComparatoronBlock(NoCollisionBlock): display_name = 'Redstone Comparator (Active)' name = 'redstoneComparatorActive' @map_block(151) class LightsensorBlock(MapBlock): display_name = 'Daylight Sensor' name = 'daylightSensor' hardness = 0.2 material = MCM_MAT_WOOD @map_block(152) class RedstoneBlock(MapBlock): display_name = 'Block of Redstone' name = 'redstoneBlock' hardness = 5 material = MCM_MAT_ROCK harvest_tools = (270, 274, 257, 278, 285) @map_block(153) class NetherquartzoreBlock(MapBlock): display_name = 'Nether Quartz Ore' name = 'netherQuartzOre' hardness = 3 material = MCM_MAT_ROCK harvest_tools = (270, 274, 257, 278, 285) @map_block(154) class HopperBlock(MapBlock): display_name = 'Hopper' name = 'hopper' hardness = 3 material = MCM_MAT_ROCK harvest_tools = (270, 274, 257, 278, 285) @map_block(155) class QuartzBlock(MapBlock): display_name = 'Block of Quartz' name = 'quartzBlock' hardness = 0.8 material = MCM_MAT_ROCK harvest_tools = (270, 274, 257, 278, 285) @map_block(156) class QuartzstairBlock(StairBlock): display_name = 'Quartz Stairs' name = 'quartzStairs' hardness = 0.8 material = MCM_MAT_ROCK harvest_tools = (270, 274, 257, 278, 285) @map_block(157) class ActivatorrailBlock(NoCollisionBlock): display_name = 'Activator Rail' name = 'activatorRail' hardness = 0.7 material = MCM_MAT_ROCK @map_block(158) class DropperBlock(MapBlock): display_name = 'Dropper' name = 'dropper' hardness = 3.5 material = MCM_MAT_ROCK harvest_tools = (270, 274, 257, 278, 285) @map_block(159) class StainedclayBlock(MapBlock): display_name = 'Stained Clay' name = 'stainedClay' hardness = 1.25 material = MCM_MAT_ROCK harvest_tools = (270, 274, 257, 278, 285) @map_block(160) class StainedglasspaneBlock(MapBlock): display_name = 'Stained Glass Pane' name = 'stainedGlassPane' hardness = 0.3 material = MCM_MAT_ROCK harvest_tools = (270, 274, 257, 278, 285) @map_block(161) class AcacialeavesBlock(MapBlock): display_name = 'Acacia Leaves' name = 'acaciaLeaves' hardness = 0.2 material = MCM_MAT_LEAVES @map_block(162) class AcaciawoodBlock(MapBlock): display_name = 'Acacia Wood' name = 'acaciaWood' hardness = 2 material = MCM_MAT_ROCK harvest_tools = (270, 274, 257, 278, 285) @map_block(163) class AcaciastairBlock(StairBlock): display_name = 'Acacia Stairs' name = 'acaciaStairs' hardness = 2 material = MCM_MAT_ROCK harvest_tools = (270, 274, 257, 278, 285) @map_block(164) class DarkoakstairBlock(StairBlock): display_name = 'Dark Oak Stairs' name = 'darkoakStairs' hardness = 2 material = MCM_MAT_ROCK harvest_tools = (270, 274, 257, 278, 285) @map_block(165) class SlimeBlock(MapBlock): display_name = 'Slime' name = 'slime' material = MCM_MAT_ROCK harvest_tools = (270, 274, 257, 278, 285) @map_block(166) class BarrierBlock(MapBlock): display_name = 'Barrier' name = 'barrier' hardness = None material = MCM_MAT_ROCK harvest_tools = (270, 274, 257, 278, 285) @map_block(167) class IrontrapdoorBlock(TrapdoorBlock): display_name = 'Iron Trapdoor' name = 'ironTrapdoor' hardness = 3 material = MCM_MAT_ROCK harvest_tools = (270, 274, 257, 278, 285) @map_block(168) class PrismarineBlock(MapBlock): display_name = 'Prismarine' name = 'prismarine' hardness = 1.5 material = MCM_MAT_ROCK harvest_tools = (270, 274, 257, 278, 285) @map_block(169) class SealanternBlock(MapBlock): display_name = 'Sea Lantern' name = 'seaLantern' hardness = 0.3 material = MCM_MAT_ROCK @map_block(170) class HaybaleBlock(MapBlock): display_name = 'Hay Bale' name = 'haybale' hardness = 0.5 material = MCM_MAT_ROCK @map_block(171) class CarpetBlock(NoCollisionBlock): display_name = 'Carpet' name = 'carpet' material = MCM_MAT_WOOL @map_block(172) class HardenedclayBlock(MapBlock): display_name = 'Hardened Clay' name = 'hardenedClay' hardness = 1.25 material = MCM_MAT_ROCK harvest_tools = (270, 274, 257, 278, 285) @map_block(173) class CoalBlock(MapBlock): display_name = 'Coal' name = 'coal' hardness = 5 material = MCM_MAT_ROCK harvest_tools = (270, 274, 257, 278, 285) @map_block(174) class PackediceBlock(MapBlock): display_name = 'Packed Ice' name = 'packedIce' hardness = 0.5 material = MCM_MAT_ROCK @map_block(175) class SunflowerBlock(NoCollisionBlock): display_name = 'Sunflower' name = 'sunflower' material = MCM_MAT_ROCK @map_block(176) class BannerfreeBlock(NoCollisionBlock): display_name = 'Free Standing Banner' name = 'bannerFree' hardness = 1 stack_size = 1 material = MCM_MAT_ROCK @map_block(177) class BannerwallBlock(NoCollisionBlock): display_name = 'Wall Mounted Banner' name = 'bannerWall' hardness = 1 stack_size = 1 material = MCM_MAT_ROCK @map_block(178) class LightsensorinvertedBlock(MapBlock): display_name = 'Inverted Daylight Sensor' name = 'daylightSensorInverted' hardness = 0.2 material = MCM_MAT_WOOD @map_block(179) class RedsandstoneBlock(MapBlock): display_name = 'Red Sandstone' name = 'redSandstone' hardness = 0.8 material = MCM_MAT_ROCK harvest_tools = (270, 274, 257, 278, 285) @map_block(180) class RedsandstonestairBlock(StairBlock): display_name = 'Red Sandstone Stairs' name = 'redSandstoneStairs' hardness = 0.8 material = MCM_MAT_ROCK harvest_tools = (270, 274, 257, 278, 285) @map_block(181) class RedsandstonedoubleslabBlock(MapBlock): display_name = 'Red Sandstone Double Slab' name = 'redSandstoneDoubleSlab' hardness = 0.8 material = MCM_MAT_ROCK harvest_tools = (270, 274, 257, 278, 285) @map_block(182) class RedsandstoneslabBlock(SlabBlock): display_name = 'Red Sandstone Slab' name = 'redSandstoneSlab' hardness = 0.8 material = MCM_MAT_ROCK harvest_tools = (270, 274, 257, 278, 285) @map_block(183) class FencegatespruceBlock(GateBlock): display_name = 'Spruce Fence Gate' name = 'fenceGateSpruce' hardness = 2 material = MCM_MAT_WOOD @map_block(184) class FencegatebirchBlock(GateBlock): display_name = 'Birch Fence Gate' name = 'fenceGateBirch' hardness = 2 material = MCM_MAT_WOOD @map_block(185) class FencegatejungleBlock(GateBlock): display_name = 'Jungle Fence Gate' name = 'fenceGateJungle' hardness = 2 material = MCM_MAT_WOOD @map_block(186) class FencegatedarkoakBlock(GateBlock): display_name = 'Dark Oak Fence Gate' name = 'fenceGateDarkOak' hardness = 2 material = MCM_MAT_WOOD @map_block(187) class FencegateacaciaBlock(GateBlock): display_name = 'Acacia Fence Gate' name = 'fenceGateAcacia' hardness = 2 material = MCM_MAT_WOOD @map_block(188) class FencespruceBlock(FenceBlock): display_name = 'Spruce Fence' name = 'fenceSpruce' hardness = 2 material = MCM_MAT_WOOD @map_block(189) class FencebirchBlock(FenceBlock): display_name = 'Birch Fence' name = 'fenceBirch' hardness = 2 material = MCM_MAT_WOOD @map_block(190) class FencejungleBlock(FenceBlock): display_name = 'Jungle Fence' name = 'fenceJungle' hardness = 2 material = MCM_MAT_WOOD @map_block(191) class FencedarkoakBlock(FenceBlock): display_name = 'Dark Oak Fence' name = 'fenceDarkOak' hardness = 2 material = MCM_MAT_WOOD @map_block(192) class FenceacaciaBlock(FenceBlock): display_name = 'Acacia Fence' name = 'fenceAcacia' hardness = 2 material = MCM_MAT_WOOD @map_block(193) class DoorspruceBlock(DoorBlock): display_name = 'Spruce Door' name = 'doorSpruce' hardness = 3 stack_size = 1 material = MCM_MAT_WOOD @map_block(194) class DoorbirchBlock(DoorBlock): display_name = 'Birch Door' name = 'doorBirch' hardness = 3 stack_size = 1 material = MCM_MAT_WOOD @map_block(195) class DoorjungleBlock(DoorBlock): display_name = 'Jungle Door' name = 'DoorJungle' hardness = 3 stack_size = 1 material = MCM_MAT_WOOD @map_block(196) class DooracaciaBlock(DoorBlock): display_name = 'Acacia Door' name = 'doorAcacia' hardness = 3 stack_size = 1 material = MCM_MAT_WOOD @map_block(197) class DoordarkoakBlock(DoorBlock): display_name = 'Dark Oak Door' name = 'doorDarkOak' hardness = 3 stack_size = 1 material = MCM_MAT_WOOD blocks = tuple(blocks[i] for i in range(len(blocks))) biomes = {} def map_biome(biome_id): def inner(cl): biomes[biome_id] = cl cl.biome_id = biome_id return cl return inner def get_biome(biome_id): return biomes[biome_id]() if biome_id in biomes else None class MapBiome(object): name = 'Map Biome' temperature = 0.0 @map_biome(0) class OceanBiome(MapBiome): name = 'Ocean' temperature = 0.5 @map_biome(1) class PlainsBiome(MapBiome): name = 'Plains' temperature = 0.8 @map_biome(2) class DesertBiome(MapBiome): name = 'Desert' temperature = 2 @map_biome(3) class ExtremeHillsBiome(MapBiome): name = 'Extreme Hills' temperature = 0.2 @map_biome(4) class ForestBiome(MapBiome): name = 'Forest' temperature = 0.7 @map_biome(5) class TaigaBiome(MapBiome): name = 'Taiga' temperature = 0.05 @map_biome(6) class SwamplandBiome(MapBiome): name = 'Swampland' temperature = 0.8 @map_biome(7) class RiverBiome(MapBiome): name = 'River' temperature = 0.5 @map_biome(8) class HellBiome(MapBiome): name = 'Hell' temperature = 2 @map_biome(9) class SkyBiome(MapBiome): name = 'Sky' temperature = 0.5 @map_biome(10) class FrozenOceanBiome(MapBiome): name = 'Frozen Ocean' temperature = 0 @map_biome(11) class FrozenRiverBiome(MapBiome): name = 'Frozen River' temperature = 0 @map_biome(12) class IcePlainsBiome(MapBiome): name = 'Ice Plains' temperature = 0 @map_biome(13) class IceMountainsBiome(MapBiome): name = 'Ice Mountains' temperature = 0 @map_biome(14) class MushroomIslandBiome(MapBiome): name = 'Mushroom Island' temperature = 0.9 @map_biome(15) class MushroomIslandShoreBiome(MapBiome): name = 'Mushroom Island Shore' temperature = 0.9 @map_biome(16) class BeachBiome(MapBiome): name = 'Beach' temperature = 0.8 @map_biome(17) class DesertHillsBiome(MapBiome): name = 'Desert Hills' temperature = 2 @map_biome(18) class ForestHillsBiome(MapBiome): name = 'Forest Hills' temperature = 0.7 @map_biome(19) class TaigaHillsBiome(MapBiome): name = 'Taiga Hills' temperature = 0.05 @map_biome(20) class ExtremeTaigaHillsEdgeBiome(MapBiome): name = 'Extreme Hills Edge' temperature = 0.2 @map_biome(21) class JungleBiome(MapBiome): name = 'Jungle' temperature = 1.2 @map_biome(22) class JungleHillsBiome(MapBiome): name = 'Jungle Hills' temperature = 1.2 @map_biome(23) class JungleEdgeBiome(MapBiome): name = 'Jungle Edge' temperature = 0.95 @map_biome(24) class DeepOceanBiome(MapBiome): name = 'Deep Ocean' temperature = 0.5 @map_biome(25) class StoneBeachBiome(MapBiome): name = 'Stone Beach' temperature = 0.2 @map_biome(26) class ColdBeachBiome(MapBiome): name = 'Cold Beach' temperature = 0 @map_biome(27) class BirchForestBiome(MapBiome): name = 'Birch Forest' temperature = 0.6 @map_biome(28) class BirchForestHillsBiome(MapBiome): name = 'Birch Forest Hills' temperature = 0.6 @map_biome(29) class RoofedForestBiome(MapBiome): name = 'Roofed Forest' temperature = 0.7 @map_biome(30) class ColdTaigaBiome(MapBiome): name = 'Cold Taiga' temperature = 0 @map_biome(31) class ColdTaigaHillsBiome(MapBiome): name = 'Cold Taiga Hills' temperature = 0 @map_biome(32) class MegaTaigaBiome(MapBiome): name = 'Mega Taiga' temperature = 0.3 @map_biome(33) class MegaTaigaHillsBiome(MapBiome): name = 'Mega Taiga Hills' temperature = 0.3 @map_biome(34) class ExtremeHillsPlusBiome(MapBiome): name = 'Extreme Hills+' temperature = 0.2 @map_biome(35) class SavannaBiome(MapBiome): name = 'Savanna' temperature = 1.0 @map_biome(36) class SavannaPlateauBiome(MapBiome): name = 'Savanna Plateau' temperature = 1.0 @map_biome(37) class MesaBiome(MapBiome): name = 'Mesa' temperature = 1.0 @map_biome(38) class MesaPlateauFBiome(MapBiome): name = 'Mesa Plateau F' temperature = 1.0 @map_biome(39) class MesaPlateauBiome(MapBiome): name = 'Mesa Plateau' temperature = 1.0 @map_biome(129) class SunflowerPlainsBiome(MapBiome): name = 'Sunflower Plains' temperature = 0.8 @map_biome(130) class DesertMBiome(MapBiome): name = 'Desert M' temperature = 2 @map_biome(131) class ExtremeHillsMBiome(MapBiome): name = 'Extreme Hills M' temperature = 0.2 @map_biome(132) class FlowerForestBiome(MapBiome): name = 'Flower Forest' temperature = 0.7 @map_biome(133) class TaigaMBiome(MapBiome): name = 'Taiga M' temperature = 0.25 @map_biome(134) class SwamplandMBiome(MapBiome): name = 'Swampland M' temperature = 0.8 @map_biome(140) class IcePlainsSpikesBiome(MapBiome): name = 'Ice Plains Spikes' temperature = 0 @map_biome(149) class JungleMBiome(MapBiome): name = 'Jungle M' temperature = 0.95 @map_biome(151) class JungleEdgeMBiome(MapBiome): name = 'Jungle Edge M' temperature = 0.95 @map_biome(155) class BirchForestMBiome(MapBiome): name = 'Birch Forest M' temperature = 0.6 @map_biome(156) class BirchForestHillsMBiome(MapBiome): name = 'Birch Forest Hills M' temperature = 0.6 @map_biome(157) class RoofedForestMBiome(MapBiome): name = 'Roofed Forest M' temperature = 0.7 @map_biome(158) class ColdTaigaMBiome(MapBiome): name = 'Cold Taiga M' temperature = 0 @map_biome(160) class MegaSpruceTaigaBiome(MapBiome): name = 'Mega Spruce Taiga' temperature = 0.25 @map_biome(161) class MegaSpruceTaigaHillsBiome(MapBiome): name = 'Mega Spruce Taiga Hills' temperature = 0.25 @map_biome(162) class ExtremeHillsPlusMBiome(MapBiome): name = 'Extreme Hills+ M' temperature = 0.2 @map_biome(163) class SavannaMBiome(MapBiome): name = 'Savanna M' temperature = 1.0 @map_biome(164) class SavannaPlateauMBiome(MapBiome): name = 'Savanna Plateau M' temperature = 1.0 @map_biome(165) class MesaBRyceBiome(MapBiome): name = 'Mesa (Bryce)' temperature = 1.0 @map_biome(166) class MesaPlateauFMBiome(MapBiome): name = 'Mesa Plateau F M' temperature = 1.0 @map_biome(167) class MesaPlateauMBiome(MapBiome): name = 'Mesa Plateau M' temperature = 1.0
mit
mbaijal/incubator-mxnet
tests/python/unittest/test_gluon_contrib.py
8
10138
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. from __future__ import print_function import mxnet as mx from mxnet.gluon import contrib from mxnet.gluon import nn from mxnet.gluon.contrib.nn import Concurrent, HybridConcurrent, Identity, SparseEmbedding from mxnet.test_utils import almost_equal from common import setup_module, with_seed, teardown import numpy as np from numpy.testing import assert_allclose def check_rnn_cell(cell, prefix, in_shape=(10, 50), out_shape=(10, 100), begin_state=None): inputs = [mx.sym.Variable('rnn_t%d_data'%i) for i in range(3)] outputs, _ = cell.unroll(3, inputs, begin_state=begin_state) outputs = mx.sym.Group(outputs) assert sorted(cell.collect_params().keys()) == [prefix+'h2h_bias', prefix+'h2h_weight', prefix+'i2h_bias', prefix+'i2h_weight'] assert outputs.list_outputs() == [prefix+'t0_out_output', prefix+'t1_out_output', prefix+'t2_out_output'] args, outs, auxs = outputs.infer_shape(rnn_t0_data=in_shape, rnn_t1_data=in_shape, rnn_t2_data=in_shape) assert outs == [out_shape]*3 def check_rnn_forward(layer, inputs): inputs.attach_grad() layer.collect_params().initialize() with mx.autograd.record(): layer.unroll(3, inputs, merge_outputs=True)[0].backward() mx.autograd.backward(layer.unroll(3, inputs, merge_outputs=False)[0]) mx.nd.waitall() @with_seed() def test_rnn_cells(): check_rnn_forward(contrib.rnn.Conv1DLSTMCell((5, 7), 10, (3,), (3,)), mx.nd.ones((8, 3, 5, 7))) check_rnn_forward(contrib.rnn.Conv1DRNNCell((5, 7), 10, (3,), (3,)), mx.nd.ones((8, 3, 5, 7))) check_rnn_forward(contrib.rnn.Conv1DGRUCell((5, 7), 10, (3,), (3,)), mx.nd.ones((8, 3, 5, 7))) net = mx.gluon.rnn.SequentialRNNCell() net.add(contrib.rnn.Conv1DLSTMCell((5, 7), 10, (3,), (3,))) net.add(contrib.rnn.Conv1DRNNCell((10, 5), 11, (3,), (3,))) net.add(contrib.rnn.Conv1DGRUCell((11, 3), 12, (3,), (3,))) check_rnn_forward(net, mx.nd.ones((8, 3, 5, 7))) @with_seed() def test_convrnn(): cell = contrib.rnn.Conv1DRNNCell((10, 50), 100, 3, 3, prefix='rnn_') check_rnn_cell(cell, prefix='rnn_', in_shape=(1, 10, 50), out_shape=(1, 100, 48)) cell = contrib.rnn.Conv2DRNNCell((10, 20, 50), 100, 3, 3, prefix='rnn_') check_rnn_cell(cell, prefix='rnn_', in_shape=(1, 10, 20, 50), out_shape=(1, 100, 18, 48)) cell = contrib.rnn.Conv3DRNNCell((10, 20, 30, 50), 100, 3, 3, prefix='rnn_') check_rnn_cell(cell, prefix='rnn_', in_shape=(1, 10, 20, 30, 50), out_shape=(1, 100, 18, 28, 48)) @with_seed() def test_convlstm(): cell = contrib.rnn.Conv1DLSTMCell((10, 50), 100, 3, 3, prefix='rnn_') check_rnn_cell(cell, prefix='rnn_', in_shape=(1, 10, 50), out_shape=(1, 100, 48)) cell = contrib.rnn.Conv2DLSTMCell((10, 20, 50), 100, 3, 3, prefix='rnn_') check_rnn_cell(cell, prefix='rnn_', in_shape=(1, 10, 20, 50), out_shape=(1, 100, 18, 48)) cell = contrib.rnn.Conv3DLSTMCell((10, 20, 30, 50), 100, 3, 3, prefix='rnn_') check_rnn_cell(cell, prefix='rnn_', in_shape=(1, 10, 20, 30, 50), out_shape=(1, 100, 18, 28, 48)) @with_seed() def test_convgru(): cell = contrib.rnn.Conv1DGRUCell((10, 50), 100, 3, 3, prefix='rnn_') check_rnn_cell(cell, prefix='rnn_', in_shape=(1, 10, 50), out_shape=(1, 100, 48)) cell = contrib.rnn.Conv2DGRUCell((10, 20, 50), 100, 3, 3, prefix='rnn_') check_rnn_cell(cell, prefix='rnn_', in_shape=(1, 10, 20, 50), out_shape=(1, 100, 18, 48)) cell = contrib.rnn.Conv3DGRUCell((10, 20, 30, 50), 100, 3, 3, prefix='rnn_') check_rnn_cell(cell, prefix='rnn_', in_shape=(1, 10, 20, 30, 50), out_shape=(1, 100, 18, 28, 48)) @with_seed() def test_conv_fill_shape(): cell = contrib.rnn.Conv1DLSTMCell((0, 7), 10, (3,), (3,)) cell.hybridize() check_rnn_forward(cell, mx.nd.ones((8, 3, 5, 7))) assert cell.i2h_weight.shape[1] == 5, cell.i2h_weight.shape[1] @with_seed() def test_lstmp(): nhid = 100 nproj = 64 cell = contrib.rnn.LSTMPCell(nhid, nproj, prefix='rnn_') inputs = [mx.sym.Variable('rnn_t%d_data'%i) for i in range(3)] outputs, _ = cell.unroll(3, inputs) outputs = mx.sym.Group(outputs) expected_params = ['rnn_h2h_bias', 'rnn_h2h_weight', 'rnn_h2r_weight', 'rnn_i2h_bias', 'rnn_i2h_weight'] expected_outputs = ['rnn_t0_out_output', 'rnn_t1_out_output', 'rnn_t2_out_output'] assert sorted(cell.collect_params().keys()) == expected_params assert outputs.list_outputs() == expected_outputs, outputs.list_outputs() args, outs, auxs = outputs.infer_shape(rnn_t0_data=(10,50), rnn_t1_data=(10,50), rnn_t2_data=(10,50)) assert outs == [(10, nproj), (10, nproj), (10, nproj)] @with_seed() def test_vardrop(): def check_vardrop(drop_inputs, drop_states, drop_outputs): cell = contrib.rnn.VariationalDropoutCell(mx.gluon.rnn.RNNCell(100, prefix='rnn_'), drop_outputs=drop_outputs, drop_states=drop_states, drop_inputs=drop_inputs) cell.collect_params().initialize(init='xavier') input_data = mx.nd.random_uniform(shape=(10, 3, 50), ctx=mx.context.current_context()) with mx.autograd.record(): outputs1, _ = cell.unroll(3, input_data, merge_outputs=True) mx.nd.waitall() outputs2, _ = cell.unroll(3, input_data, merge_outputs=True) assert not almost_equal(outputs1.asnumpy(), outputs2.asnumpy()) inputs = [mx.sym.Variable('rnn_t%d_data'%i) for i in range(3)] outputs, _ = cell.unroll(3, inputs, merge_outputs=False) outputs = mx.sym.Group(outputs) args, outs, auxs = outputs.infer_shape(rnn_t0_data=(10,50), rnn_t1_data=(10,50), rnn_t2_data=(10,50)) assert outs == [(10, 100), (10, 100), (10, 100)] cell.reset() cell.hybridize() with mx.autograd.record(): outputs3, _ = cell.unroll(3, input_data, merge_outputs=True) mx.nd.waitall() outputs4, _ = cell.unroll(3, input_data, merge_outputs=True) assert not almost_equal(outputs3.asnumpy(), outputs4.asnumpy()) assert not almost_equal(outputs1.asnumpy(), outputs3.asnumpy()) check_vardrop(0.5, 0.5, 0.5) check_vardrop(0.5, 0, 0.5) def test_concurrent(): model = HybridConcurrent(axis=1) model.add(nn.Dense(128, activation='tanh', in_units=10)) model.add(nn.Dense(64, activation='tanh', in_units=10)) model.add(nn.Dense(32, in_units=10)) model2 = Concurrent(axis=1) model2.add(nn.Dense(128, activation='tanh', in_units=10)) model2.add(nn.Dense(64, activation='tanh', in_units=10)) model2.add(nn.Dense(32, in_units=10)) # symbol x = mx.sym.var('data') y = model(x) assert len(y.list_arguments()) == 7 # ndarray model.initialize(mx.init.Xavier(magnitude=2.24)) model2.initialize(mx.init.Xavier(magnitude=2.24)) x = model(mx.nd.zeros((32, 10))) x2 = model2(mx.nd.zeros((32, 10))) assert x.shape == (32, 224) assert x2.shape == (32, 224) x.wait_to_read() x2.wait_to_read() @with_seed() def test_identity(): model = Identity() x = mx.nd.random.uniform(shape=(128, 33, 64)) mx.test_utils.assert_almost_equal(model(x).asnumpy(), x.asnumpy()) @with_seed() def test_sparse_embedding(): layer = SparseEmbedding(10, 100) layer.initialize() trainer = mx.gluon.Trainer(layer.collect_params(), 'sgd') x = mx.nd.array([3,4,2,0,1]) with mx.autograd.record(): y = layer(x) y.backward() assert (layer.weight.grad().asnumpy()[:5] == 1).all() assert (layer.weight.grad().asnumpy()[5:] == 0).all() def test_datasets(): wikitext2_train = contrib.data.text.WikiText2(root='data/wikitext-2', segment='train') wikitext2_val = contrib.data.text.WikiText2(root='data/wikitext-2', segment='validation', vocab=wikitext2_train.vocabulary) wikitext2_test = contrib.data.text.WikiText2(root='data/wikitext-2', segment='test') assert len(wikitext2_train) == 59305, len(wikitext2_train) assert len(wikitext2_train.vocabulary) == 33278, len(wikitext2_train.vocabulary) assert len(wikitext2_train.frequencies) == 33277, len(wikitext2_train.frequencies) assert len(wikitext2_val) == 6181, len(wikitext2_val) assert len(wikitext2_val.vocabulary) == 33278, len(wikitext2_val.vocabulary) assert len(wikitext2_val.frequencies) == 13776, len(wikitext2_val.frequencies) assert len(wikitext2_test) == 6974, len(wikitext2_test) assert len(wikitext2_test.vocabulary) == 14143, len(wikitext2_test.vocabulary) assert len(wikitext2_test.frequencies) == 14142, len(wikitext2_test.frequencies) assert wikitext2_test.frequencies['English'] == 32 def test_sampler(): interval_sampler = contrib.data.IntervalSampler(10, 3) assert sorted(list(interval_sampler)) == list(range(10)) interval_sampler = contrib.data.IntervalSampler(10, 3, rollover=False) assert list(interval_sampler) == [0, 3, 6, 9] if __name__ == '__main__': import nose nose.runmodule()
apache-2.0