repo_name
stringlengths
6
103
path
stringlengths
5
191
copies
stringlengths
1
4
size
stringlengths
4
6
content
stringlengths
986
970k
license
stringclasses
15 values
justincassidy/scikit-learn
sklearn/tree/tests/test_export.py
130
9950
""" Testing for export functions of decision trees (sklearn.tree.export). """ from re import finditer from numpy.testing import assert_equal from nose.tools import assert_raises from sklearn.tree import DecisionTreeClassifier, DecisionTreeRegressor from sklearn.ensemble import GradientBoostingClassifier from sklearn.tree import export_graphviz from sklearn.externals.six import StringIO from sklearn.utils.testing import assert_in # toy sample X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]] y = [-1, -1, -1, 1, 1, 1] y2 = [[-1, 1], [-1, 2], [-1, 3], [1, 1], [1, 2], [1, 3]] w = [1, 1, 1, .5, .5, .5] def test_graphviz_toy(): # Check correctness of export_graphviz clf = DecisionTreeClassifier(max_depth=3, min_samples_split=1, criterion="gini", random_state=2) clf.fit(X, y) # Test export code out = StringIO() export_graphviz(clf, out_file=out) contents1 = out.getvalue() contents2 = 'digraph Tree {\n' \ 'node [shape=box] ;\n' \ '0 [label="X[0] <= 0.0\\ngini = 0.5\\nsamples = 6\\n' \ 'value = [3, 3]"] ;\n' \ '1 [label="gini = 0.0\\nsamples = 3\\nvalue = [3, 0]"] ;\n' \ '0 -> 1 [labeldistance=2.5, labelangle=45, ' \ 'headlabel="True"] ;\n' \ '2 [label="gini = 0.0\\nsamples = 3\\nvalue = [0, 3]"] ;\n' \ '0 -> 2 [labeldistance=2.5, labelangle=-45, ' \ 'headlabel="False"] ;\n' \ '}' assert_equal(contents1, contents2) # Test with feature_names out = StringIO() export_graphviz(clf, out_file=out, feature_names=["feature0", "feature1"]) contents1 = out.getvalue() contents2 = 'digraph Tree {\n' \ 'node [shape=box] ;\n' \ '0 [label="feature0 <= 0.0\\ngini = 0.5\\nsamples = 6\\n' \ 'value = [3, 3]"] ;\n' \ '1 [label="gini = 0.0\\nsamples = 3\\nvalue = [3, 0]"] ;\n' \ '0 -> 1 [labeldistance=2.5, labelangle=45, ' \ 'headlabel="True"] ;\n' \ '2 [label="gini = 0.0\\nsamples = 3\\nvalue = [0, 3]"] ;\n' \ '0 -> 2 [labeldistance=2.5, labelangle=-45, ' \ 'headlabel="False"] ;\n' \ '}' assert_equal(contents1, contents2) # Test with class_names out = StringIO() export_graphviz(clf, out_file=out, class_names=["yes", "no"]) contents1 = out.getvalue() contents2 = 'digraph Tree {\n' \ 'node [shape=box] ;\n' \ '0 [label="X[0] <= 0.0\\ngini = 0.5\\nsamples = 6\\n' \ 'value = [3, 3]\\nclass = yes"] ;\n' \ '1 [label="gini = 0.0\\nsamples = 3\\nvalue = [3, 0]\\n' \ 'class = yes"] ;\n' \ '0 -> 1 [labeldistance=2.5, labelangle=45, ' \ 'headlabel="True"] ;\n' \ '2 [label="gini = 0.0\\nsamples = 3\\nvalue = [0, 3]\\n' \ 'class = no"] ;\n' \ '0 -> 2 [labeldistance=2.5, labelangle=-45, ' \ 'headlabel="False"] ;\n' \ '}' assert_equal(contents1, contents2) # Test plot_options out = StringIO() export_graphviz(clf, out_file=out, filled=True, impurity=False, proportion=True, special_characters=True, rounded=True) contents1 = out.getvalue() contents2 = 'digraph Tree {\n' \ 'node [shape=box, style="filled, rounded", color="black", ' \ 'fontname=helvetica] ;\n' \ 'edge [fontname=helvetica] ;\n' \ '0 [label=<X<SUB>0</SUB> &le; 0.0<br/>samples = 100.0%<br/>' \ 'value = [0.5, 0.5]>, fillcolor="#e5813900"] ;\n' \ '1 [label=<samples = 50.0%<br/>value = [1.0, 0.0]>, ' \ 'fillcolor="#e58139ff"] ;\n' \ '0 -> 1 [labeldistance=2.5, labelangle=45, ' \ 'headlabel="True"] ;\n' \ '2 [label=<samples = 50.0%<br/>value = [0.0, 1.0]>, ' \ 'fillcolor="#399de5ff"] ;\n' \ '0 -> 2 [labeldistance=2.5, labelangle=-45, ' \ 'headlabel="False"] ;\n' \ '}' assert_equal(contents1, contents2) # Test max_depth out = StringIO() export_graphviz(clf, out_file=out, max_depth=0, class_names=True) contents1 = out.getvalue() contents2 = 'digraph Tree {\n' \ 'node [shape=box] ;\n' \ '0 [label="X[0] <= 0.0\\ngini = 0.5\\nsamples = 6\\n' \ 'value = [3, 3]\\nclass = y[0]"] ;\n' \ '1 [label="(...)"] ;\n' \ '0 -> 1 ;\n' \ '2 [label="(...)"] ;\n' \ '0 -> 2 ;\n' \ '}' assert_equal(contents1, contents2) # Test max_depth with plot_options out = StringIO() export_graphviz(clf, out_file=out, max_depth=0, filled=True, node_ids=True) contents1 = out.getvalue() contents2 = 'digraph Tree {\n' \ 'node [shape=box, style="filled", color="black"] ;\n' \ '0 [label="node #0\\nX[0] <= 0.0\\ngini = 0.5\\n' \ 'samples = 6\\nvalue = [3, 3]", fillcolor="#e5813900"] ;\n' \ '1 [label="(...)", fillcolor="#C0C0C0"] ;\n' \ '0 -> 1 ;\n' \ '2 [label="(...)", fillcolor="#C0C0C0"] ;\n' \ '0 -> 2 ;\n' \ '}' assert_equal(contents1, contents2) # Test multi-output with weighted samples clf = DecisionTreeClassifier(max_depth=2, min_samples_split=1, criterion="gini", random_state=2) clf = clf.fit(X, y2, sample_weight=w) out = StringIO() export_graphviz(clf, out_file=out, filled=True, impurity=False) contents1 = out.getvalue() contents2 = 'digraph Tree {\n' \ 'node [shape=box, style="filled", color="black"] ;\n' \ '0 [label="X[0] <= 0.0\\nsamples = 6\\n' \ 'value = [[3.0, 1.5, 0.0]\\n' \ '[1.5, 1.5, 1.5]]", fillcolor="#e5813900"] ;\n' \ '1 [label="X[1] <= -1.5\\nsamples = 3\\n' \ 'value = [[3, 0, 0]\\n[1, 1, 1]]", ' \ 'fillcolor="#e5813965"] ;\n' \ '0 -> 1 [labeldistance=2.5, labelangle=45, ' \ 'headlabel="True"] ;\n' \ '2 [label="samples = 1\\nvalue = [[1, 0, 0]\\n' \ '[0, 0, 1]]", fillcolor="#e58139ff"] ;\n' \ '1 -> 2 ;\n' \ '3 [label="samples = 2\\nvalue = [[2, 0, 0]\\n' \ '[1, 1, 0]]", fillcolor="#e581398c"] ;\n' \ '1 -> 3 ;\n' \ '4 [label="X[0] <= 1.5\\nsamples = 3\\n' \ 'value = [[0.0, 1.5, 0.0]\\n[0.5, 0.5, 0.5]]", ' \ 'fillcolor="#e5813965"] ;\n' \ '0 -> 4 [labeldistance=2.5, labelangle=-45, ' \ 'headlabel="False"] ;\n' \ '5 [label="samples = 2\\nvalue = [[0.0, 1.0, 0.0]\\n' \ '[0.5, 0.5, 0.0]]", fillcolor="#e581398c"] ;\n' \ '4 -> 5 ;\n' \ '6 [label="samples = 1\\nvalue = [[0.0, 0.5, 0.0]\\n' \ '[0.0, 0.0, 0.5]]", fillcolor="#e58139ff"] ;\n' \ '4 -> 6 ;\n' \ '}' assert_equal(contents1, contents2) # Test regression output with plot_options clf = DecisionTreeRegressor(max_depth=3, min_samples_split=1, criterion="mse", random_state=2) clf.fit(X, y) out = StringIO() export_graphviz(clf, out_file=out, filled=True, leaves_parallel=True, rotate=True, rounded=True) contents1 = out.getvalue() contents2 = 'digraph Tree {\n' \ 'node [shape=box, style="filled, rounded", color="black", ' \ 'fontname=helvetica] ;\n' \ 'graph [ranksep=equally, splines=polyline] ;\n' \ 'edge [fontname=helvetica] ;\n' \ 'rankdir=LR ;\n' \ '0 [label="X[0] <= 0.0\\nmse = 1.0\\nsamples = 6\\n' \ 'value = 0.0", fillcolor="#e581397f"] ;\n' \ '1 [label="mse = 0.0\\nsamples = 3\\nvalue = -1.0", ' \ 'fillcolor="#e5813900"] ;\n' \ '0 -> 1 [labeldistance=2.5, labelangle=-45, ' \ 'headlabel="True"] ;\n' \ '2 [label="mse = 0.0\\nsamples = 3\\nvalue = 1.0", ' \ 'fillcolor="#e58139ff"] ;\n' \ '0 -> 2 [labeldistance=2.5, labelangle=45, ' \ 'headlabel="False"] ;\n' \ '{rank=same ; 0} ;\n' \ '{rank=same ; 1; 2} ;\n' \ '}' assert_equal(contents1, contents2) def test_graphviz_errors(): # Check for errors of export_graphviz clf = DecisionTreeClassifier(max_depth=3, min_samples_split=1) clf.fit(X, y) # Check feature_names error out = StringIO() assert_raises(IndexError, export_graphviz, clf, out, feature_names=[]) # Check class_names error out = StringIO() assert_raises(IndexError, export_graphviz, clf, out, class_names=[]) def test_friedman_mse_in_graphviz(): clf = DecisionTreeRegressor(criterion="friedman_mse", random_state=0) clf.fit(X, y) dot_data = StringIO() export_graphviz(clf, out_file=dot_data) clf = GradientBoostingClassifier(n_estimators=2, random_state=0) clf.fit(X, y) for estimator in clf.estimators_: export_graphviz(estimator[0], out_file=dot_data) for finding in finditer("\[.*?samples.*?\]", dot_data.getvalue()): assert_in("friedman_mse", finding.group())
bsd-3-clause
justincassidy/scikit-learn
sklearn/utils/class_weight.py
139
7206
# Authors: Andreas Mueller # Manoj Kumar # License: BSD 3 clause import warnings import numpy as np from ..externals import six from ..utils.fixes import in1d from .fixes import bincount def compute_class_weight(class_weight, classes, y): """Estimate class weights for unbalanced datasets. Parameters ---------- class_weight : dict, 'balanced' or None If 'balanced', class weights will be given by ``n_samples / (n_classes * np.bincount(y))``. If a dictionary is given, keys are classes and values are corresponding class weights. If None is given, the class weights will be uniform. classes : ndarray Array of the classes occurring in the data, as given by ``np.unique(y_org)`` with ``y_org`` the original class labels. y : array-like, shape (n_samples,) Array of original class labels per sample; Returns ------- class_weight_vect : ndarray, shape (n_classes,) Array with class_weight_vect[i] the weight for i-th class References ---------- The "balanced" heuristic is inspired by Logistic Regression in Rare Events Data, King, Zen, 2001. """ # Import error caused by circular imports. from ..preprocessing import LabelEncoder if class_weight is None or len(class_weight) == 0: # uniform class weights weight = np.ones(classes.shape[0], dtype=np.float64, order='C') elif class_weight in ['auto', 'balanced']: # Find the weight of each class as present in y. le = LabelEncoder() y_ind = le.fit_transform(y) if not all(np.in1d(classes, le.classes_)): raise ValueError("classes should have valid labels that are in y") # inversely proportional to the number of samples in the class if class_weight == 'auto': recip_freq = 1. / bincount(y_ind) weight = recip_freq[le.transform(classes)] / np.mean(recip_freq) warnings.warn("The class_weight='auto' heuristic is deprecated in" " favor of a new heuristic class_weight='balanced'." " 'auto' will be removed in 0.18", DeprecationWarning) else: recip_freq = len(y) / (len(le.classes_) * bincount(y_ind).astype(np.float64)) weight = recip_freq[le.transform(classes)] else: # user-defined dictionary weight = np.ones(classes.shape[0], dtype=np.float64, order='C') if not isinstance(class_weight, dict): raise ValueError("class_weight must be dict, 'auto', or None," " got: %r" % class_weight) for c in class_weight: i = np.searchsorted(classes, c) if classes[i] != c: raise ValueError("Class label %d not present." % c) else: weight[i] = class_weight[c] return weight def compute_sample_weight(class_weight, y, indices=None): """Estimate sample weights by class for unbalanced datasets. Parameters ---------- class_weight : dict, list of dicts, "balanced", or None, optional Weights associated with classes in the form ``{class_label: weight}``. If not given, all classes are supposed to have weight one. For multi-output problems, a list of dicts can be provided in the same order as the columns of y. The "balanced" mode uses the values of y to automatically adjust weights inversely proportional to class frequencies in the input data: ``n_samples / (n_classes * np.bincount(y))``. For multi-output, the weights of each column of y will be multiplied. y : array-like, shape = [n_samples] or [n_samples, n_outputs] Array of original class labels per sample. indices : array-like, shape (n_subsample,), or None Array of indices to be used in a subsample. Can be of length less than n_samples in the case of a subsample, or equal to n_samples in the case of a bootstrap subsample with repeated indices. If None, the sample weight will be calculated over the full sample. Only "auto" is supported for class_weight if this is provided. Returns ------- sample_weight_vect : ndarray, shape (n_samples,) Array with sample weights as applied to the original y """ y = np.atleast_1d(y) if y.ndim == 1: y = np.reshape(y, (-1, 1)) n_outputs = y.shape[1] if isinstance(class_weight, six.string_types): if class_weight not in ['balanced', 'auto']: raise ValueError('The only valid preset for class_weight is ' '"balanced". Given "%s".' % class_weight) elif (indices is not None and not isinstance(class_weight, six.string_types)): raise ValueError('The only valid class_weight for subsampling is ' '"balanced". Given "%s".' % class_weight) elif n_outputs > 1: if (not hasattr(class_weight, "__iter__") or isinstance(class_weight, dict)): raise ValueError("For multi-output, class_weight should be a " "list of dicts, or a valid string.") if len(class_weight) != n_outputs: raise ValueError("For multi-output, number of elements in " "class_weight should match number of outputs.") expanded_class_weight = [] for k in range(n_outputs): y_full = y[:, k] classes_full = np.unique(y_full) classes_missing = None if class_weight in ['balanced', 'auto'] or n_outputs == 1: class_weight_k = class_weight else: class_weight_k = class_weight[k] if indices is not None: # Get class weights for the subsample, covering all classes in # case some labels that were present in the original data are # missing from the sample. y_subsample = y[indices, k] classes_subsample = np.unique(y_subsample) weight_k = np.choose(np.searchsorted(classes_subsample, classes_full), compute_class_weight(class_weight_k, classes_subsample, y_subsample), mode='clip') classes_missing = set(classes_full) - set(classes_subsample) else: weight_k = compute_class_weight(class_weight_k, classes_full, y_full) weight_k = weight_k[np.searchsorted(classes_full, y_full)] if classes_missing: # Make missing classes' weight zero weight_k[in1d(y_full, list(classes_missing))] = 0. expanded_class_weight.append(weight_k) expanded_class_weight = np.prod(expanded_class_weight, axis=0, dtype=np.float64) return expanded_class_weight
bsd-3-clause
PiscesDream/Ideas
ML/co-evaluate/ann.py
1
7314
''' update: 2014/09/03: softmax in the last layer special: plot, plot_interval ''' import theano import theano.tensor as T import gzip import cPickle import numpy import time class HiddenLayer(object): def __init__(self, rng, input, n_in, n_out, W=None, b=None, activation=T.tanh): self.input = input if W is None: W_values = numpy.asarray(rng.uniform( low=-numpy.sqrt(6. / (n_in + n_out)), high=numpy.sqrt(6. / (n_in + n_out)), size=(n_in, n_out)), dtype=theano.config.floatX) if activation == theano.tensor.nnet.sigmoid: W_values *= 4 W = theano.shared(value=W_values, name='W', borrow=True) if b is None: b_values = numpy.zeros((n_out,), dtype=theano.config.floatX) b = theano.shared(value=b_values, name='b', borrow=True) self.W = W self.b = b lin_output = T.dot(input, self.W) + self.b self.output = (lin_output if activation is None else activation(lin_output)) # parameters of the model self.params = [self.W, self.b] class ANN(object): def __init__(self, n_in, n_out, lmbd = 0.01, hiddens = [10]): x = T.matrix('x') y = T.ivector('y') lr = T.scalar('lr') rng = numpy.random.RandomState(numpy.random.randint(2 ** 30)) params = [] hid_layers = [] L2 = .0 n_hid = hiddens + [n_out] for ind, ele in enumerate(n_hid): if ind == 0: input = x n_in = n_in else: input = hid_layers[-1].output n_in = n_hid[ind-1] if ind == len(n_hid) - 1: activation = T.nnet.softmax else: activation = T.nnet.sigmoid layer = HiddenLayer(rng, input = input, n_in = n_in, n_out = ele, activation = activation) hid_layers.append( layer) L2 += T.sum(layer.W ** 2) params.extend([layer.W, layer.b]) nl = -T.mean(T.log(hid_layers[-1].output)[T.arange(y.shape[0]), y]) cost = nl + L2 * lmbd grads = T.grad(cost, params) updates = [] for param_i, grad_i in zip(params, grads): updates.append((param_i, param_i - lr * grad_i)) y_pred = T.argmax(hid_layers[-1].output, 1) errors = T.mean(T.neq(y_pred, y)) self.n_in = n_in self.n_out = n_out self.hiddens = hiddens self.x = x self.y = y self.lr = lr self.cost = cost self.errors = errors self.updates = updates #self.pred = y_pred self.time = [] self.hid_layers = hid_layers def fit(self, datasets, batch_size = 500, n_epochs = 200, lr = 0.01, plot = None, plot_interval = None): ''' without validation''' index = T.lscalar() train_set_x, train_set_y = datasets[0] test_set_x, test_set_y = datasets[1] try: n_train_batches = train_set_x.get_value(borrow=True).shape[0] n_test_batches = test_set_x.get_value(borrow=True).shape[0] except: n_train_batches = train_set_x.shape[0] n_test_batches = test_set_x.shape[0] n_train_batches /= batch_size n_test_batches /= batch_size train_model = theano.function([index], self.cost, updates = self.updates, givens = { self.x: train_set_x[index * batch_size: (index + 1) * batch_size], self.y: train_set_y[index * batch_size: (index + 1) * batch_size], self.lr: lr}) test_model = theano.function([], self.errors, givens = { self.x: test_set_x, self.y: test_set_y}) debug_f = theano.function([index], self.errors, givens = { self.x: test_set_x[index * batch_size : (index+1) * batch_size], self.y: test_set_y[index * batch_size : (index+1) * batch_size]}) # print numpy.mean([debug_f(i) for i in xrange(n_test_batches)]) print(test_model()) print '...training' maxiter = n_epochs iteration = 0 while iteration < maxiter: start_time = time.time() iteration += 1 print 'iteration %d' % iteration for minibatch_index in xrange(n_train_batches): print '\tL of (%03d/%03d) = %f\r' % (minibatch_index, n_train_batches, train_model(minibatch_index)), print '' print 'error = %f (size=%d)' % (test_model(), test_set_y.shape[0].eval()) self.time.append(time.time()-start_time) if plot: if iteration % plot_interval == 0: plot(self, iteration) def __repr__(self): return '<CNN: %r; HID: %r>' % (self.nkerns, self.nhiddens) def pred(self, x): return theano.function([], T.argmax(self.hid_layers[-1].output, 1), givens = {self.x: x})() def prob(self, x): return theano.function([], self.hid_layers[-1].output, givens = {self.x: x})() def __repr__(self): return '<ANN:%r-%r-%r>' % (self.n_in, self.hiddens, self.n_out) def get_neg_log(self, x, y): return theano.function([], -T.log(self.hid_layers[-1].output)[T.arange(self.y.shape[0]), self.y], givens={self.x:x, self.y:y})() def load_data(dataset, num = None): print '... loading data' f = gzip.open(dataset, 'rb') train_set, valid_set, test_set = cPickle.load(f) train_set = (numpy.concatenate([train_set[0], valid_set[0]], 0), numpy.concatenate([train_set[1], valid_set[1]], 0)) f.close() def shared_dataset(data_xy, borrow=True, num = None): data_x, data_y = data_xy if num: data_x = data_x[:num] data_y = data_y[:num] # data_y = boarden(10, data_y) size = int(data_x.shape[1]**.5) # data_x = data_x.reshape(data_x.shape[0], -1) print data_x.shape, data_y.shape shared_x = theano.shared(numpy.asarray(data_x, dtype=theano.config.floatX), borrow=borrow) shared_y = theano.shared(numpy.asarray(data_y, dtype=theano.config.floatX), borrow=borrow) return shared_x, T.cast(shared_y, 'int32') test_set_x, test_set_y = shared_dataset(test_set, num = num) # valid_set_x, valid_set_y = shared_dataset(valid_set, num = num) train_set_x, train_set_y = shared_dataset(train_set, num = num) rval = [(train_set_x, train_set_y), #(valid_set_x, valid_set_y), (test_set_x, test_set_y)] return rval if __name__ == '__main__': theano.config.exception_verbosity='high' theano.config.on_unused_input='ignore' datasets = load_data('../../Data/mnist/mnist.pkl.gz') cl = ANN(28 * 28, 10, hiddens = [1]) cl.fit(datasets, lr = 0.1)
apache-2.0
y3ah/Sentiment_Categorization
source/feature_extraction.py
1
2145
#-*- coding: utf-8 -*- #feature_extraction.py import codecs import numpy numpy.set_printoptions(threshold=numpy.inf) from sklearn.feature_extraction.text import CountVectorizer #from sklearn.preprocessing import normalize from sklearn.feature_extraction.text import TfidfTransformer #从文件读入语料 def feature_extraction(in_file_name): in_file = codecs.open(in_file_name, 'r','latin-1') #经过预处理的语料 corpus = [] while True: #遍历文档 doc = in_file.readline().strip() if doc == '': #读到文件尾时会返回空字符串 break corpus.append(doc) #生成二元,tf,tfidf三种参数的空间 #注意:可以通过CountVectorizer的max_features参数选择保留的feature数量 max_features = None #max_features = 10000 bin_vectorizer = CountVectorizer(max_features=max_features, binary=True,min_df=3)#binary occurrence markers print 'calculating term occurence feature...' term_occurence = bin_vectorizer.fit_transform(corpus) #二元特征 ''' tf_vectorizer = CountVectorizer(max_features=max_features,min_df=3) term_counts = tf_vectorizer.fit_transform(corpus) #tf = normalize(term_counts, axis=1, norm='l2') print 'calculating tf feature...' tf_transformer = TfidfTransformer(norm='l1', use_idf=False) tf = tf_transformer.fit_transform(term_counts) #tf特征 ''' ''' print 'calculating tf-idf feature...' tfidf_transformer = TfidfTransformer() tfidf = tfidf_transformer.fit_transform(term_counts) #tf-idf特征 ''' return bin_vectorizer, term_occurence #后续: #过滤<num>之类的标签 #过滤低频词,可以用max_features或max_df #bigram其中一项是停用词的情况 #bigram第一项可以stem的情况 #从文件读入类标 fin = codecs.open('../data/train_class.txt','r') class_label = [] for line in fin: label = line.strip().split(" ")[1] if label == "negative": class_label.append(0) elif label == "positive": class_label.append(1) class_label = numpy.array(class_label)
mit
rmaestre/SVM-mapreduce
SVM-training.py
1
2742
# -*- coding: utf-8 -*- # <nbformat>3.0</nbformat> # <codecell> from sklearn import svm from sklearn import cross_validation import numpy as np import matplotlib.pyplot as plt # <codecell> def array_to_file(vectors, filename): f_out = open(filename, "w") if len(vectors.shape) == 2: for vector in vectors: strings = ["%.2f" % number for number in vector] f_out.write("%s\n" % '\t'.join(strings)) else: strings = ["%.2f" % number for number in vectors] f_out.write("%s\n" % '\t'.join(strings)) f_out.close() # <codecell> # Load data set file_training = "data/Training50K.csv" # read data values training_data = np.genfromtxt(file_training, dtype=float, skip_header=1, delimiter='\t') # Features (data cols from 0 to 15) X = training_data[:,range(0,16)] # Labels (data cols) labels = {"y1": 15, "y2":16, "y3":17} label = "y1" y = training_data[:,labels[label]] print("\nTraining SVM for %s label" %label) # <codecell> # K-Fold validation X_train, X_test, y_train, y_test = cross_validation.train_test_split(X, y, test_size=0.4, random_state=1) print("\nK-Fold validation:") print("X train: %s" % str(X_train.shape)) print("y train: %s" % str(y_train.shape)) print("X test: %s" % str(X_test.shape)) print("y test: %s" % str(y_test.shape)) # <codecell> # Classify gamma_parameter = 0.001 # Create model clf = svm.SVC(C=50., kernel='rbf', gamma=gamma_parameter, probability=False) # Clasify clf.fit(X_train, y_train) # Dump info about the model print("\nSupported vectors length: %s" % str(clf.support_vectors_.shape)) print("Dual coef. length: %s" % str(clf.dual_coef_.shape)) # <codecell> score = clf.score(X_test, y_test) print("\nScore k-fold validation: %f" % score) # <codecell> # Save support vectors in a file """ array_to_file(clf.support_vectors_, "data/svm_models/%s_model_supported_vectors.tsv" % label) array_to_file(clf.dual_coef_, "data/svm_models/%s_model_dual_coef.tsv" % label) array_to_file(X_test, "data/svm_models/%s_model_X_test.tsv" % label) array_to_file(y_test, "data/svm_models/%s_model_y_test.tsv" % label) array_to_file(X_train, "data/svm_models/%s_model_X_train.tsv" % label) array_to_file(y_train, "data/svm_models/%s_model_y_train.tsv" % label) """ # <codecell> # Vector index to test (from dataset) index = 1 vector = X[index] print("\nVector %s Labeled: %s Model prediction: %s" % (index, y[index], clf.predict(vector))) sum_up = 0 for i in range(0, clf.support_vectors_.shape[0]): sum_up = sum_up + (clf.dual_coef_[0,i] * np.linalg.norm(vector - clf.support_vectors_[i])) print("Decision function: %s" % clf.decision_function(vector)) if sum_up < 0.0: print([0]) else: print([1]) # <codecell>
apache-2.0
graingert/luigi
luigi/contrib/bigquery.py
18
13975
# -*- coding: utf-8 -*- # # Copyright 2015 Twitter Inc # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import collections import logging import luigi.target import time logger = logging.getLogger('luigi-interface') try: import httplib2 import oauth2client from googleapiclient import discovery from googleapiclient import http except ImportError: logger.warning('Bigquery module imported, but google-api-python-client is ' 'not installed. Any bigquery task will fail') class CreateDisposition(object): CREATE_IF_NEEDED = 'CREATE_IF_NEEDED' CREATE_NEVER = 'CREATE_NEVER' class WriteDisposition(object): WRITE_TRUNCATE = 'WRITE_TRUNCATE' WRITE_APPEND = 'WRITE_APPEND' WRITE_EMPTY = 'WRITE_EMPTY' class QueryMode(object): INTERACTIVE = 'INTERACTIVE' BATCH = 'BATCH' class SourceFormat(object): CSV = 'CSV' DATASTORE_BACKUP = 'DATASTORE_BACKUP' NEWLINE_DELIMITED_JSON = 'NEWLINE_DELIMITED_JSON' BQDataset = collections.namedtuple('BQDataset', 'project_id dataset_id') class BQTable(collections.namedtuple('BQTable', 'project_id dataset_id table_id')): @property def dataset(self): return BQDataset(project_id=self.project_id, dataset_id=self.dataset_id) class BigqueryClient(object): """A client for Google BigQuery. For details of how authentication and the descriptor work, see the documentation for the GCS client. The descriptor URL for BigQuery is https://www.googleapis.com/discovery/v1/apis/bigquery/v2/rest """ def __init__(self, oauth_credentials=None, descriptor='', http_=None): http_ = http_ or httplib2.Http() if not oauth_credentials: oauth_credentials = oauth2client.client.GoogleCredentials.get_application_default() if descriptor: self.client = discovery.build_from_document(descriptor, credentials=oauth_credentials, http=http_) else: self.client = discovery.build('bigquery', 'v2', credentials=oauth_credentials, http=http_) def dataset_exists(self, dataset): """Returns whether the given dataset exists. :param dataset: :type dataset: BQDataset """ try: self.client.datasets().get(projectId=dataset.project_id, datasetId=dataset.dataset_id).execute() except http.HttpError as ex: if ex.resp.status == 404: return False raise return True def table_exists(self, table): """Returns whether the given table exists. :param table: :type table: BQTable """ if not self.dataset_exists(table.dataset): return False try: self.client.tables().get(projectId=table.project_id, datasetId=table.dataset_id, tableId=table.table_id).execute() except http.HttpError as ex: if ex.resp.status == 404: return False raise return True def make_dataset(self, dataset, raise_if_exists=False, body={}): """Creates a new dataset with the default permissions. :param dataset: :type dataset: BQDataset :param raise_if_exists: whether to raise an exception if the dataset already exists. :raises luigi.target.FileAlreadyExists: if raise_if_exists=True and the dataset exists """ try: self.client.datasets().insert(projectId=dataset.project_id, body=dict( {'id': '{}:{}'.format(dataset.project_id, dataset.dataset_id)}, **body)).execute() except http.HttpError as ex: if ex.resp.status == 409: if raise_if_exists: raise luigi.target.FileAlreadyExists() else: raise def delete_dataset(self, dataset, delete_nonempty=True): """Deletes a dataset (and optionally any tables in it), if it exists. :param dataset: :type dataset: BQDataset :param delete_nonempty: if true, will delete any tables before deleting the dataset """ if not self.dataset_exists(dataset): return self.client.datasets().delete(projectId=dataset.project_id, datasetId=dataset.dataset_id, deleteContents=delete_nonempty).execute() def delete_table(self, table): """Deletes a table, if it exists. :param table: :type table: BQTable """ if not self.table_exists(table): return self.client.tables().delete(projectId=table.project_id, datasetId=table.dataset_id, tableId=table.table_id).execute() def list_datasets(self, project_id): """Returns the list of datasets in a given project. :param project_id: :type project_id: str """ request = self.client.datasets().list(projectId=project_id) response = request.execute() while response is not None: for ds in response.get('datasets', []): yield ds['datasetReference']['datasetId'] request = self.client.datasets().list_next(request, response) if request is None: break response = request.execute() def list_tables(self, dataset): """Returns the list of tables in a given dataset. :param dataset: :type dataset: BQDataset """ request = self.client.tables().list(projectId=dataset.project_id, datasetId=dataset.dataset_id) response = request.execute() while response is not None: for t in response.get('tables', []): yield t['tableReference']['tableId'] request = self.client.tables().list_next(request, response) if request is None: break response = request.execute() def run_job(self, project_id, body, dataset=None): """Runs a bigquery "job". See the documentation for the format of body. .. note:: You probably don't need to use this directly. Use the tasks defined below. :param dataset: :type dataset: BQDataset """ if dataset and not self.dataset_exists(dataset): self.make_dataset(dataset) new_job = self.client.jobs().insert(projectId=project_id, body=body).execute() job_id = new_job['jobReference']['jobId'] logger.info('Started import job %s:%s', project_id, job_id) while True: status = self.client.jobs().get(projectId=project_id, jobId=job_id).execute() if status['status']['state'] == 'DONE': if status['status'].get('errors'): raise Exception('Bigquery job failed: {}'.format(status['status']['errors'])) return logger.info('Waiting for job %s:%s to complete...', project_id, job_id) time.sleep(5.0) def copy(self, source_table, dest_table, create_disposition=CreateDisposition.CREATE_IF_NEEDED, write_disposition=WriteDisposition.WRITE_TRUNCATE): """Copies (or appends) a table to another table. :param source_table: :type source_table: BQTable :param dest_table: :type dest_table: BQTable :param create_disposition: whether to create the table if needed :type create_disposition: CreateDisposition :param write_disposition: whether to append/truncate/fail if the table exists :type write_disposition: WriteDisposition """ job = { "projectId": dest_table.project_id, "configuration": { "copy": { "sourceTable": { "projectId": source_table.project_id, "datasetId": source_table.dataset_id, "tableId": source_table.table_id, }, "destinationTable": { "projectId": dest_table.project_id, "datasetId": dest_table.dataset_id, "tableId": dest_table.table_id, }, "createDisposition": create_disposition, "writeDisposition": write_disposition, } } } self.run_job(dest_table.project_id, job, dataset=dest_table.dataset) class BigqueryTarget(luigi.target.Target): def __init__(self, project_id, dataset_id, table_id, client=None): self.table = BQTable(project_id=project_id, dataset_id=dataset_id, table_id=table_id) self.client = client or BigqueryClient() @classmethod def from_bqtable(cls, table, client=None): """A constructor that takes a :py:class:`BQTable`. :param table: :type table: BQTable """ return cls(table.project_id, table.dataset_id, table.table_id, client=client) def exists(self): return self.client.table_exists(self.table) def __str__(self): return str(self.table) class BigqueryLoadTask(luigi.Task): """Load data into bigquery from GCS.""" @property def source_format(self): """The source format to use (see :py:class:`SourceFormat`).""" return SourceFormat.NEWLINE_DELIMITED_JSON @property def write_disposition(self): """What to do if the table already exists. By default this will fail the job. See :py:class:`WriteDisposition`""" return WriteDisposition.WRITE_EMPTY @property def schema(self): """Schema in the format defined at https://cloud.google.com/bigquery/docs/reference/v2/jobs#configuration.load.schema. If the value is falsy, it is omitted and inferred by bigquery, which only works for CSV inputs.""" return [] @property def max_bad_records(self): return 0 @property def source_uris(self): """Source data which should be in GCS.""" return [x.path for x in luigi.task.flatten(self.input())] def run(self): output = self.output() assert isinstance(output, BigqueryTarget), 'Output should be a bigquery target, not %s' % (output) bq_client = output.client source_uris = self.source_uris() assert all(x.startswith('gs://') for x in source_uris) job = { 'projectId': output.table.project_id, 'configuration': { 'load': { 'destinationTable': { 'projectId': output.table.project_id, 'datasetId': output.table.dataset_id, 'tableId': output.table.table_id, }, 'sourceFormat': self.source_format, 'writeDisposition': self.write_disposition, 'sourceUris': source_uris, 'maxBadRecords': self.max_bad_records, } } } if self.schema: job['configuration']['load']['schema'] = {'fields': self.schema} bq_client.run_job(output.table.project_id, job, dataset=output.table.dataset) class BigqueryRunQueryTask(luigi.Task): @property def write_disposition(self): """What to do if the table already exists. By default this will fail the job. See :py:class:`WriteDisposition`""" return WriteDisposition.WRITE_TRUNCATE @property def create_disposition(self): """Whether to create the table or not. See :py:class:`CreateDisposition`""" return CreateDisposition.CREATE_IF_NEEDED @property def query(self): """The query, in text form.""" raise NotImplementedError() @property def query_mode(self): """The query mode. See :py:class:`QueryMode`.""" return QueryMode.INTERACTIVE def run(self): output = self.output() assert isinstance(output, BigqueryTarget), 'Output should be a bigquery target, not %s' % (output) query = self.query assert query, 'No query was provided' bq_client = output.client logger.info('Launching Query') logger.info('Query destination: %s (%s)', output, self.write_disposition) logger.info('Query SQL: %s', query) job = { 'projectId': output.table.project_id, 'configuration': { 'query': { 'query': query, 'priority': self.query_mode, 'destinationTable': { 'projectId': output.table.project_id, 'datasetId': output.table.dataset_id, 'tableId': output.table.table_id, }, 'allowLargeResults': True, 'createDisposition': self.create_disposition, 'writeDisposition': self.write_disposition, } } } bq_client.run_job(output.table.project_id, job, dataset=output.table.dataset)
apache-2.0
weissercn/learningml
learningml/GoF/optimisation_and_evaluation/automatisation_gaussian_same_projection/automatisation_Gaussian_same_projection_optimisation_and_evaluation_euclidean.py
1
3583
import numpy as np import math import sys import os sys.path.insert(0,os.environ['learningml']+'/GoF/') import classifier_eval from classifier_eval import name_to_nclf, nclf, experiment, make_keras_model from sklearn import tree from sklearn.ensemble import AdaBoostClassifier from sklearn.svm import SVC from rep.estimators import XGBoostClassifier from keras.wrappers.scikit_learn import KerasClassifier import time #nclf_list = [nclf()] #nclf_list = [nclf(), name_to_nclf("bdt"), nclf('xgb',XGBoostClassifier(),['n_estimators','eta'], [[10,1000],[0.01,1.0]]) ] #nclf_list = [nclf('xgb',XGBoostClassifier(),['n_estimators','eta'], [[10,1000],[0.01,1.0]], param_opt=[1000.,0.9738])] #nclf_list = [nclf('nn',"no classifier needed for nn", ['n_hidden_layers','dimof_middle'], [[0,1],[100,500]],param_opt=[0,500])] #nclf_list = [name_to_nclf("nn")] #nclf_list = [name_to_nclf("bdt"), name_to_nclf("xgb"), name_to_nclf("svm"), name_to_nclf("nn")] #nclf_list = [name_to_nclf("bdt"), name_to_nclf("xgb"), name_to_nclf("nn")] nclf_list = [name_to_nclf("svm")] #nclf_list = [nclf('bdt',AdaBoostClassifier(base_estimator=tree.DecisionTreeClassifier(max_depth=2)), ['learning_rate','n_estimators'], [[0.01,2.0],[1,1000]], param_opt=[1.181, 319]), nclf('xgb',XGBoostClassifier(), ['n_estimators','eta'], [[10,1000],[0.01,1.0]], param_opt=[524, 0.151]), nclf('nn',"no classifier needed for nn", ['n_hidden_layers','dimof_middle'], [[0,1],[100,500]],param_opt=[0,455])] #nclf_list = [nclf('nn',"no classifier needed for nn", ['n_hidden_layers','dimof_middle'], [[0,1],[100,500]],param_opt=[0,455])] systematics_fraction = 0.01 file_name_patterns= [ os.environ['learningml']+"/GoF/data/gaussian_same_projection_on_each_axis/gauss_data/gaussian_same_projection_on_each_axis_{0}D_10000_0.0_1.0_1.0_{1}_euclidean.txt", os.environ['learningml']+"/GoF/data/gaussian_same_projection_on_each_axis/gauss_data/gaussian_same_projection_on_each_axis_{0}D_10000_0.0_0.95_0.95_{1}_euclidean.txt" ] #file_name_patterns= [ os.environ['learningml']+"/GoF/data/gaussian_same_projection_on_each_axis/gauss_data/gaussian_same_projection_on_each_axis_redefined_{0}D_10000_0.0_1.0_1.0_optimisation_{1}.txt", os.environ['learningml']+"/GoF/data/gaussian_same_projection_on_each_axis/gauss_data/gaussian_same_projection_on_each_axis_redefined_{0}D_10000_0.0_1.0_0.9_optimisation_{1}.txt" ] name_CPV= "{0}Dgauss__0_95__0_95_CPV_not_redefined_euclidean" name_noCPV= "{0}Dgauss__1_0__1_0_noCPV_not_redefined_euclidean" #name_CPV= "{0}Dgauss__1_0__0_95_CPV_chi2scoringopt" #name_noCPV= "{0}Dgauss__1_0__1_0_noCPV_chi2scoringopt" title_CPV = "Gauss 0.95 0.95 euclidean" title_noCPV="Gauss 1.0 1.0 euclidean" directory_name = "_0_95__0_95_not_redefined_euclidean" expt = experiment(nclf_list=nclf_list, file_name_patterns=file_name_patterns, scoring='chi2',single_no_bins_list = [5], systematics_fraction = systematics_fraction, only_mod=False, title_CPV=title_CPV, title_noCPV=title_noCPV, name_CPV=name_CPV, name_noCPV=name_noCPV, directory_name=directory_name) start_time = time.time() expt.optimise(optimisation_dimension = 4, keras_optimisation_dimension = 1, number_of_iterations=50) #optimisation gave nn param_opt evaluation_start_time = time.time() print(50*"-"+"\noptimisation took ", (evaluation_start_time - start_time)/60. , " minutes\n" +50*"-") expt.evaluate(evaluation_dimensions = range(1,11), keras_evaluation_dimensions = [1]*10, number_of_evaluations=100) end_time = time.time() print(50*"-"+"\nevaluation took ", (end_time - evaluation_start_time)/60. , " minutes\n" +50*"-")
mit
msimacek/samba
third_party/dnspython/dns/zone.py
47
32039
# Copyright (C) 2003-2007, 2009-2011 Nominum, Inc. # # Permission to use, copy, modify, and distribute this software and its # documentation for any purpose with or without fee is hereby granted, # provided that the above copyright notice and this permission notice # appear in all copies. # # THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES # WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF # MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR # ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES # WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN # ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT # OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. """DNS Zones.""" from __future__ import generators import sys import dns.exception import dns.name import dns.node import dns.rdataclass import dns.rdatatype import dns.rdata import dns.rrset import dns.tokenizer import dns.ttl class BadZone(dns.exception.DNSException): """The zone is malformed.""" pass class NoSOA(BadZone): """The zone has no SOA RR at its origin.""" pass class NoNS(BadZone): """The zone has no NS RRset at its origin.""" pass class UnknownOrigin(BadZone): """The zone's origin is unknown.""" pass class Zone(object): """A DNS zone. A Zone is a mapping from names to nodes. The zone object may be treated like a Python dictionary, e.g. zone[name] will retrieve the node associated with that name. The I{name} may be a dns.name.Name object, or it may be a string. In the either case, if the name is relative it is treated as relative to the origin of the zone. @ivar rdclass: The zone's rdata class; the default is class IN. @type rdclass: int @ivar origin: The origin of the zone. @type origin: dns.name.Name object @ivar nodes: A dictionary mapping the names of nodes in the zone to the nodes themselves. @type nodes: dict @ivar relativize: should names in the zone be relativized? @type relativize: bool @cvar node_factory: the factory used to create a new node @type node_factory: class or callable """ node_factory = dns.node.Node __slots__ = ['rdclass', 'origin', 'nodes', 'relativize'] def __init__(self, origin, rdclass=dns.rdataclass.IN, relativize=True): """Initialize a zone object. @param origin: The origin of the zone. @type origin: dns.name.Name object @param rdclass: The zone's rdata class; the default is class IN. @type rdclass: int""" self.rdclass = rdclass self.origin = origin self.nodes = {} self.relativize = relativize def __eq__(self, other): """Two zones are equal if they have the same origin, class, and nodes. @rtype: bool """ if not isinstance(other, Zone): return False if self.rdclass != other.rdclass or \ self.origin != other.origin or \ self.nodes != other.nodes: return False return True def __ne__(self, other): """Are two zones not equal? @rtype: bool """ return not self.__eq__(other) def _validate_name(self, name): if isinstance(name, (str, unicode)): name = dns.name.from_text(name, None) elif not isinstance(name, dns.name.Name): raise KeyError("name parameter must be convertable to a DNS name") if name.is_absolute(): if not name.is_subdomain(self.origin): raise KeyError("name parameter must be a subdomain of the zone origin") if self.relativize: name = name.relativize(self.origin) return name def __getitem__(self, key): key = self._validate_name(key) return self.nodes[key] def __setitem__(self, key, value): key = self._validate_name(key) self.nodes[key] = value def __delitem__(self, key): key = self._validate_name(key) del self.nodes[key] def __iter__(self): return self.nodes.iterkeys() def iterkeys(self): return self.nodes.iterkeys() def keys(self): return self.nodes.keys() def itervalues(self): return self.nodes.itervalues() def values(self): return self.nodes.values() def iteritems(self): return self.nodes.iteritems() def items(self): return self.nodes.items() def get(self, key): key = self._validate_name(key) return self.nodes.get(key) def __contains__(self, other): return other in self.nodes def find_node(self, name, create=False): """Find a node in the zone, possibly creating it. @param name: the name of the node to find @type name: dns.name.Name object or string @param create: should the node be created if it doesn't exist? @type create: bool @raises KeyError: the name is not known and create was not specified. @rtype: dns.node.Node object """ name = self._validate_name(name) node = self.nodes.get(name) if node is None: if not create: raise KeyError node = self.node_factory() self.nodes[name] = node return node def get_node(self, name, create=False): """Get a node in the zone, possibly creating it. This method is like L{find_node}, except it returns None instead of raising an exception if the node does not exist and creation has not been requested. @param name: the name of the node to find @type name: dns.name.Name object or string @param create: should the node be created if it doesn't exist? @type create: bool @rtype: dns.node.Node object or None """ try: node = self.find_node(name, create) except KeyError: node = None return node def delete_node(self, name): """Delete the specified node if it exists. It is not an error if the node does not exist. """ name = self._validate_name(name) if self.nodes.has_key(name): del self.nodes[name] def find_rdataset(self, name, rdtype, covers=dns.rdatatype.NONE, create=False): """Look for rdata with the specified name and type in the zone, and return an rdataset encapsulating it. The I{name}, I{rdtype}, and I{covers} parameters may be strings, in which case they will be converted to their proper type. The rdataset returned is not a copy; changes to it will change the zone. KeyError is raised if the name or type are not found. Use L{get_rdataset} if you want to have None returned instead. @param name: the owner name to look for @type name: DNS.name.Name object or string @param rdtype: the rdata type desired @type rdtype: int or string @param covers: the covered type (defaults to None) @type covers: int or string @param create: should the node and rdataset be created if they do not exist? @type create: bool @raises KeyError: the node or rdata could not be found @rtype: dns.rrset.RRset object """ name = self._validate_name(name) if isinstance(rdtype, (str, unicode)): rdtype = dns.rdatatype.from_text(rdtype) if isinstance(covers, (str, unicode)): covers = dns.rdatatype.from_text(covers) node = self.find_node(name, create) return node.find_rdataset(self.rdclass, rdtype, covers, create) def get_rdataset(self, name, rdtype, covers=dns.rdatatype.NONE, create=False): """Look for rdata with the specified name and type in the zone, and return an rdataset encapsulating it. The I{name}, I{rdtype}, and I{covers} parameters may be strings, in which case they will be converted to their proper type. The rdataset returned is not a copy; changes to it will change the zone. None is returned if the name or type are not found. Use L{find_rdataset} if you want to have KeyError raised instead. @param name: the owner name to look for @type name: DNS.name.Name object or string @param rdtype: the rdata type desired @type rdtype: int or string @param covers: the covered type (defaults to None) @type covers: int or string @param create: should the node and rdataset be created if they do not exist? @type create: bool @rtype: dns.rrset.RRset object """ try: rdataset = self.find_rdataset(name, rdtype, covers, create) except KeyError: rdataset = None return rdataset def delete_rdataset(self, name, rdtype, covers=dns.rdatatype.NONE): """Delete the rdataset matching I{rdtype} and I{covers}, if it exists at the node specified by I{name}. The I{name}, I{rdtype}, and I{covers} parameters may be strings, in which case they will be converted to their proper type. It is not an error if the node does not exist, or if there is no matching rdataset at the node. If the node has no rdatasets after the deletion, it will itself be deleted. @param name: the owner name to look for @type name: DNS.name.Name object or string @param rdtype: the rdata type desired @type rdtype: int or string @param covers: the covered type (defaults to None) @type covers: int or string """ name = self._validate_name(name) if isinstance(rdtype, (str, unicode)): rdtype = dns.rdatatype.from_text(rdtype) if isinstance(covers, (str, unicode)): covers = dns.rdatatype.from_text(covers) node = self.get_node(name) if not node is None: node.delete_rdataset(self.rdclass, rdtype, covers) if len(node) == 0: self.delete_node(name) def replace_rdataset(self, name, replacement): """Replace an rdataset at name. It is not an error if there is no rdataset matching I{replacement}. Ownership of the I{replacement} object is transferred to the zone; in other words, this method does not store a copy of I{replacement} at the node, it stores I{replacement} itself. If the I{name} node does not exist, it is created. @param name: the owner name @type name: DNS.name.Name object or string @param replacement: the replacement rdataset @type replacement: dns.rdataset.Rdataset """ if replacement.rdclass != self.rdclass: raise ValueError('replacement.rdclass != zone.rdclass') node = self.find_node(name, True) node.replace_rdataset(replacement) def find_rrset(self, name, rdtype, covers=dns.rdatatype.NONE): """Look for rdata with the specified name and type in the zone, and return an RRset encapsulating it. The I{name}, I{rdtype}, and I{covers} parameters may be strings, in which case they will be converted to their proper type. This method is less efficient than the similar L{find_rdataset} because it creates an RRset instead of returning the matching rdataset. It may be more convenient for some uses since it returns an object which binds the owner name to the rdata. This method may not be used to create new nodes or rdatasets; use L{find_rdataset} instead. KeyError is raised if the name or type are not found. Use L{get_rrset} if you want to have None returned instead. @param name: the owner name to look for @type name: DNS.name.Name object or string @param rdtype: the rdata type desired @type rdtype: int or string @param covers: the covered type (defaults to None) @type covers: int or string @raises KeyError: the node or rdata could not be found @rtype: dns.rrset.RRset object """ name = self._validate_name(name) if isinstance(rdtype, (str, unicode)): rdtype = dns.rdatatype.from_text(rdtype) if isinstance(covers, (str, unicode)): covers = dns.rdatatype.from_text(covers) rdataset = self.nodes[name].find_rdataset(self.rdclass, rdtype, covers) rrset = dns.rrset.RRset(name, self.rdclass, rdtype, covers) rrset.update(rdataset) return rrset def get_rrset(self, name, rdtype, covers=dns.rdatatype.NONE): """Look for rdata with the specified name and type in the zone, and return an RRset encapsulating it. The I{name}, I{rdtype}, and I{covers} parameters may be strings, in which case they will be converted to their proper type. This method is less efficient than the similar L{get_rdataset} because it creates an RRset instead of returning the matching rdataset. It may be more convenient for some uses since it returns an object which binds the owner name to the rdata. This method may not be used to create new nodes or rdatasets; use L{find_rdataset} instead. None is returned if the name or type are not found. Use L{find_rrset} if you want to have KeyError raised instead. @param name: the owner name to look for @type name: DNS.name.Name object or string @param rdtype: the rdata type desired @type rdtype: int or string @param covers: the covered type (defaults to None) @type covers: int or string @rtype: dns.rrset.RRset object """ try: rrset = self.find_rrset(name, rdtype, covers) except KeyError: rrset = None return rrset def iterate_rdatasets(self, rdtype=dns.rdatatype.ANY, covers=dns.rdatatype.NONE): """Return a generator which yields (name, rdataset) tuples for all rdatasets in the zone which have the specified I{rdtype} and I{covers}. If I{rdtype} is dns.rdatatype.ANY, the default, then all rdatasets will be matched. @param rdtype: int or string @type rdtype: int or string @param covers: the covered type (defaults to None) @type covers: int or string """ if isinstance(rdtype, (str, unicode)): rdtype = dns.rdatatype.from_text(rdtype) if isinstance(covers, (str, unicode)): covers = dns.rdatatype.from_text(covers) for (name, node) in self.iteritems(): for rds in node: if rdtype == dns.rdatatype.ANY or \ (rds.rdtype == rdtype and rds.covers == covers): yield (name, rds) def iterate_rdatas(self, rdtype=dns.rdatatype.ANY, covers=dns.rdatatype.NONE): """Return a generator which yields (name, ttl, rdata) tuples for all rdatas in the zone which have the specified I{rdtype} and I{covers}. If I{rdtype} is dns.rdatatype.ANY, the default, then all rdatas will be matched. @param rdtype: int or string @type rdtype: int or string @param covers: the covered type (defaults to None) @type covers: int or string """ if isinstance(rdtype, (str, unicode)): rdtype = dns.rdatatype.from_text(rdtype) if isinstance(covers, (str, unicode)): covers = dns.rdatatype.from_text(covers) for (name, node) in self.iteritems(): for rds in node: if rdtype == dns.rdatatype.ANY or \ (rds.rdtype == rdtype and rds.covers == covers): for rdata in rds: yield (name, rds.ttl, rdata) def to_file(self, f, sorted=True, relativize=True, nl=None): """Write a zone to a file. @param f: file or string. If I{f} is a string, it is treated as the name of a file to open. @param sorted: if True, the file will be written with the names sorted in DNSSEC order from least to greatest. Otherwise the names will be written in whatever order they happen to have in the zone's dictionary. @param relativize: if True, domain names in the output will be relativized to the zone's origin (if possible). @type relativize: bool @param nl: The end of line string. If not specified, the output will use the platform's native end-of-line marker (i.e. LF on POSIX, CRLF on Windows, CR on Macintosh). @type nl: string or None """ if sys.hexversion >= 0x02030000: # allow Unicode filenames str_type = basestring else: str_type = str if nl is None: opts = 'w' else: opts = 'wb' if isinstance(f, str_type): f = file(f, opts) want_close = True else: want_close = False try: if sorted: names = self.keys() names.sort() else: names = self.iterkeys() for n in names: l = self[n].to_text(n, origin=self.origin, relativize=relativize) if nl is None: print >> f, l else: f.write(l) f.write(nl) finally: if want_close: f.close() def check_origin(self): """Do some simple checking of the zone's origin. @raises dns.zone.NoSOA: there is no SOA RR @raises dns.zone.NoNS: there is no NS RRset @raises KeyError: there is no origin node """ if self.relativize: name = dns.name.empty else: name = self.origin if self.get_rdataset(name, dns.rdatatype.SOA) is None: raise NoSOA if self.get_rdataset(name, dns.rdatatype.NS) is None: raise NoNS class _MasterReader(object): """Read a DNS master file @ivar tok: The tokenizer @type tok: dns.tokenizer.Tokenizer object @ivar ttl: The default TTL @type ttl: int @ivar last_name: The last name read @type last_name: dns.name.Name object @ivar current_origin: The current origin @type current_origin: dns.name.Name object @ivar relativize: should names in the zone be relativized? @type relativize: bool @ivar zone: the zone @type zone: dns.zone.Zone object @ivar saved_state: saved reader state (used when processing $INCLUDE) @type saved_state: list of (tokenizer, current_origin, last_name, file) tuples. @ivar current_file: the file object of the $INCLUDed file being parsed (None if no $INCLUDE is active). @ivar allow_include: is $INCLUDE allowed? @type allow_include: bool @ivar check_origin: should sanity checks of the origin node be done? The default is True. @type check_origin: bool """ def __init__(self, tok, origin, rdclass, relativize, zone_factory=Zone, allow_include=False, check_origin=True): if isinstance(origin, (str, unicode)): origin = dns.name.from_text(origin) self.tok = tok self.current_origin = origin self.relativize = relativize self.ttl = 0 self.last_name = None self.zone = zone_factory(origin, rdclass, relativize=relativize) self.saved_state = [] self.current_file = None self.allow_include = allow_include self.check_origin = check_origin def _eat_line(self): while 1: token = self.tok.get() if token.is_eol_or_eof(): break def _rr_line(self): """Process one line from a DNS master file.""" # Name if self.current_origin is None: raise UnknownOrigin token = self.tok.get(want_leading = True) if not token.is_whitespace(): self.last_name = dns.name.from_text(token.value, self.current_origin) else: token = self.tok.get() if token.is_eol_or_eof(): # treat leading WS followed by EOL/EOF as if they were EOL/EOF. return self.tok.unget(token) name = self.last_name if not name.is_subdomain(self.zone.origin): self._eat_line() return if self.relativize: name = name.relativize(self.zone.origin) token = self.tok.get() if not token.is_identifier(): raise dns.exception.SyntaxError # TTL try: ttl = dns.ttl.from_text(token.value) token = self.tok.get() if not token.is_identifier(): raise dns.exception.SyntaxError except dns.ttl.BadTTL: ttl = self.ttl # Class try: rdclass = dns.rdataclass.from_text(token.value) token = self.tok.get() if not token.is_identifier(): raise dns.exception.SyntaxError except dns.exception.SyntaxError: raise dns.exception.SyntaxError except: rdclass = self.zone.rdclass if rdclass != self.zone.rdclass: raise dns.exception.SyntaxError("RR class is not zone's class") # Type try: rdtype = dns.rdatatype.from_text(token.value) except: raise dns.exception.SyntaxError("unknown rdatatype '%s'" % token.value) n = self.zone.nodes.get(name) if n is None: n = self.zone.node_factory() self.zone.nodes[name] = n try: rd = dns.rdata.from_text(rdclass, rdtype, self.tok, self.current_origin, False) except dns.exception.SyntaxError: # Catch and reraise. (ty, va) = sys.exc_info()[:2] raise va except: # All exceptions that occur in the processing of rdata # are treated as syntax errors. This is not strictly # correct, but it is correct almost all of the time. # We convert them to syntax errors so that we can emit # helpful filename:line info. (ty, va) = sys.exc_info()[:2] raise dns.exception.SyntaxError("caught exception %s: %s" % (str(ty), str(va))) rd.choose_relativity(self.zone.origin, self.relativize) covers = rd.covers() rds = n.find_rdataset(rdclass, rdtype, covers, True) rds.add(rd, ttl) def read(self): """Read a DNS master file and build a zone object. @raises dns.zone.NoSOA: No SOA RR was found at the zone origin @raises dns.zone.NoNS: No NS RRset was found at the zone origin """ try: while 1: token = self.tok.get(True, True).unescape() if token.is_eof(): if not self.current_file is None: self.current_file.close() if len(self.saved_state) > 0: (self.tok, self.current_origin, self.last_name, self.current_file, self.ttl) = self.saved_state.pop(-1) continue break elif token.is_eol(): continue elif token.is_comment(): self.tok.get_eol() continue elif token.value[0] == '$': u = token.value.upper() if u == '$TTL': token = self.tok.get() if not token.is_identifier(): raise dns.exception.SyntaxError("bad $TTL") self.ttl = dns.ttl.from_text(token.value) self.tok.get_eol() elif u == '$ORIGIN': self.current_origin = self.tok.get_name() self.tok.get_eol() if self.zone.origin is None: self.zone.origin = self.current_origin elif u == '$INCLUDE' and self.allow_include: token = self.tok.get() if not token.is_quoted_string(): raise dns.exception.SyntaxError("bad filename in $INCLUDE") filename = token.value token = self.tok.get() if token.is_identifier(): new_origin = dns.name.from_text(token.value, \ self.current_origin) self.tok.get_eol() elif not token.is_eol_or_eof(): raise dns.exception.SyntaxError("bad origin in $INCLUDE") else: new_origin = self.current_origin self.saved_state.append((self.tok, self.current_origin, self.last_name, self.current_file, self.ttl)) self.current_file = file(filename, 'r') self.tok = dns.tokenizer.Tokenizer(self.current_file, filename) self.current_origin = new_origin else: raise dns.exception.SyntaxError("Unknown master file directive '" + u + "'") continue self.tok.unget(token) self._rr_line() except dns.exception.SyntaxError, detail: (filename, line_number) = self.tok.where() if detail is None: detail = "syntax error" raise dns.exception.SyntaxError("%s:%d: %s" % (filename, line_number, detail)) # Now that we're done reading, do some basic checking of the zone. if self.check_origin: self.zone.check_origin() def from_text(text, origin = None, rdclass = dns.rdataclass.IN, relativize = True, zone_factory=Zone, filename=None, allow_include=False, check_origin=True): """Build a zone object from a master file format string. @param text: the master file format input @type text: string. @param origin: The origin of the zone; if not specified, the first $ORIGIN statement in the master file will determine the origin of the zone. @type origin: dns.name.Name object or string @param rdclass: The zone's rdata class; the default is class IN. @type rdclass: int @param relativize: should names be relativized? The default is True @type relativize: bool @param zone_factory: The zone factory to use @type zone_factory: function returning a Zone @param filename: The filename to emit when describing where an error occurred; the default is '<string>'. @type filename: string @param allow_include: is $INCLUDE allowed? @type allow_include: bool @param check_origin: should sanity checks of the origin node be done? The default is True. @type check_origin: bool @raises dns.zone.NoSOA: No SOA RR was found at the zone origin @raises dns.zone.NoNS: No NS RRset was found at the zone origin @rtype: dns.zone.Zone object """ # 'text' can also be a file, but we don't publish that fact # since it's an implementation detail. The official file # interface is from_file(). if filename is None: filename = '<string>' tok = dns.tokenizer.Tokenizer(text, filename) reader = _MasterReader(tok, origin, rdclass, relativize, zone_factory, allow_include=allow_include, check_origin=check_origin) reader.read() return reader.zone def from_file(f, origin = None, rdclass = dns.rdataclass.IN, relativize = True, zone_factory=Zone, filename=None, allow_include=True, check_origin=True): """Read a master file and build a zone object. @param f: file or string. If I{f} is a string, it is treated as the name of a file to open. @param origin: The origin of the zone; if not specified, the first $ORIGIN statement in the master file will determine the origin of the zone. @type origin: dns.name.Name object or string @param rdclass: The zone's rdata class; the default is class IN. @type rdclass: int @param relativize: should names be relativized? The default is True @type relativize: bool @param zone_factory: The zone factory to use @type zone_factory: function returning a Zone @param filename: The filename to emit when describing where an error occurred; the default is '<file>', or the value of I{f} if I{f} is a string. @type filename: string @param allow_include: is $INCLUDE allowed? @type allow_include: bool @param check_origin: should sanity checks of the origin node be done? The default is True. @type check_origin: bool @raises dns.zone.NoSOA: No SOA RR was found at the zone origin @raises dns.zone.NoNS: No NS RRset was found at the zone origin @rtype: dns.zone.Zone object """ if sys.hexversion >= 0x02030000: # allow Unicode filenames; turn on universal newline support str_type = basestring opts = 'rU' else: str_type = str opts = 'r' if isinstance(f, str_type): if filename is None: filename = f f = file(f, opts) want_close = True else: if filename is None: filename = '<file>' want_close = False try: z = from_text(f, origin, rdclass, relativize, zone_factory, filename, allow_include, check_origin) finally: if want_close: f.close() return z def from_xfr(xfr, zone_factory=Zone, relativize=True): """Convert the output of a zone transfer generator into a zone object. @param xfr: The xfr generator @type xfr: generator of dns.message.Message objects @param relativize: should names be relativized? The default is True. It is essential that the relativize setting matches the one specified to dns.query.xfr(). @type relativize: bool @raises dns.zone.NoSOA: No SOA RR was found at the zone origin @raises dns.zone.NoNS: No NS RRset was found at the zone origin @rtype: dns.zone.Zone object """ z = None for r in xfr: if z is None: if relativize: origin = r.origin else: origin = r.answer[0].name rdclass = r.answer[0].rdclass z = zone_factory(origin, rdclass, relativize=relativize) for rrset in r.answer: znode = z.nodes.get(rrset.name) if not znode: znode = z.node_factory() z.nodes[rrset.name] = znode zrds = znode.find_rdataset(rrset.rdclass, rrset.rdtype, rrset.covers, True) zrds.update_ttl(rrset.ttl) for rd in rrset: rd.choose_relativity(z.origin, relativize) zrds.add(rd) z.check_origin() return z
gpl-3.0
Laurawly/tvm-1
python/tvm/relay/frontend/pytorch.py
1
148541
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. # pylint: disable=import-self, too-many-lines, len-as-condition, no-else-return, unused-variable, too-many-nested-blocks # pylint: disable=consider-iterating-dictionary, invalid-name, unused-argument, unused-variable, broad-except # pylint: disable=import-outside-toplevel, simplifiable-if-expression, cell-var-from-loop, unnecessary-lambda # pylint: disable=missing-function-docstring """PT: PyTorch frontend.""" import functools import itertools import math import sys import logging import numpy as np import tvm from tvm.ir import IRModule from tvm.topi.utils import get_const_tuple from .. import analysis as _analysis from .. import expr as _expr from .. import function as _function from .. import op as _op from .. import qnn, transform from ..expr_functor import ExprMutator from ..loops import while_loop from ..prelude import Prelude, StaticTensorArrayOps from ..ty import Any, TensorType, TupleType from . import qnn_torch from .common import AttrCvt, get_relay_op, gru_cell, logger from .common import infer_shape as _infer_shape from .common import infer_value as _infer_value from .common import infer_value_simulated as _infer_value_simulated from .common import lstm_cell, try_infer_value, unbind from .common import set_span from .pytorch_utils import is_version_greater_than __all__ = ["from_pytorch"] # This returns a "subgraph" which puts variables whenever # the type is known. It also records things to map the input # nodes to the extracted graph's nodes. # As Python objects are not round-trippable through C++, and # our type annotations only live in Python, we need to map # the we need to map the nodes we get in visiting to the nodes # we used to construct the graph (they are the same in C++, # match each other in dictionary lookups, but are not the same # in Python) by using the hint dictionary filled as # {node: node for node in nodes} to get the type annotations. # https://discuss.tvm.apache.org/t/round-tripping-objects-through-the-ffi/8440 class _TypeFinder(ExprMutator): def __init__(self, types): super().__init__() self.counter = 0 self.vars = {} self.types = types self.leave = set() # some variables are not inputs def visit_let(self, let): self.leave.add(let.var) return super().visit_let(let) def visit_function(self, fn): self.leave.update(fn.params) return super().visit_function(fn) def visit(self, expr): if expr in self.leave: return super().visit(expr) if expr in self.vars: return self.vars[expr] if isinstance(expr, tvm.relay.Var): self.vars[expr] = expr return expr if expr in self.types: ty = self.types[expr] v = tvm.relay.var(f"_{self.counter}", type_annotation=ty) self.counter += 1 self.vars[expr] = v return v v = super().visit(expr) return v def _should_construct_dynamic_list(list_construct_node): # if this list is element-accessed or modified at runtime, generate List ADT def inplace_add_to_add(op_name): if op_name == "aten::add_": return "aten::add" else: return op_name uses = _get_uses(list_construct_node) for loop_use in filter(lambda use: use.user.kind() == "prim::Loop", uses): block_input_index = loop_use.offset - 1 block = list(loop_use.user.blocks())[0] list_loop_var = list(block.inputs())[block_input_index] uses += _get_uses(list_loop_var.node()) op_names = map(inplace_add_to_add, set(use.user.kind() for use in uses)) list_ops = set(["aten::add", "aten::__getitem__"]) intersect = list_ops.intersection(op_names) if len(intersect) > 0 and intersect != set(["aten::add"]): return True # if add op outputs list, it is dynamic so we need to construct List ADT for use in filter(lambda use: use.user.kind() in ["aten::add", "aten::add_"], uses): output_type = _get_node_type(use.user) if output_type == "ListType": return True return False def _is_int_seq(seq): # TODO (t-vi): handle non-int constants? (like numpy.intXX) return len(seq) > 0 and all([isinstance(i, int) for i in seq]) # operator implementation class PyTorchOpConverter: """A helper class for holding PyTorch op converters.""" def __init__(self, prelude, default_dtype): self.prelude = prelude self.default_dtype = default_dtype self.create_convert_map() self.types = {} # map from nodes to (Relay) type annotations # this incrementally infers the type, see the comments on the type visitor # above. def infer_type(self, node, mod=None): """An incremental method to infer the type of a node in the relay graph.""" if node in self.types: return self.types[node] if isinstance(node, tvm.relay.Var): return node.type_annotation tf = _TypeFinder(types=self.types) new_node = tf.visit(node) fn = _function.Function(list(tf.vars.values()), new_node) new_mod = IRModule({"main": fn}) if mod is not None: new_mod.update(mod) new_mod = transform.RemoveUnusedFunctions()(new_mod) new_mod = transform.InferType()(new_mod) entry = new_mod["main"] ty = entry.body.checked_type self.types[node] = ty return self.types[node] def infer_type_with_prelude(self, val): body = self.infer_type(val, self.prelude.mod) return body # list ADT utilities def convert_to_list_adt(self, py_lst): elem_tys = [self.infer_type_with_prelude(elem) for elem in py_lst] msg = "List elements should have identical types" assert all(map(lambda ty: ty == elem_tys[0], elem_tys)), msg # get_type returns type_name, ctor1, ..., ctorN # 1 is nil _, cons, nil = self.prelude.mod.get_type("List") adt_lst = nil() for elem in reversed(py_lst): adt_lst = cons(elem, adt_lst) return adt_lst def map_tensor_array_constructor(self, adt_lst, shape): static_tensor_array_ops = StaticTensorArrayOps(self.prelude, "float32", shape) static_tensor_array_ops.register() tensor_create = self.prelude.get_tensor_ctor_static("tensor_constructor", "float32", shape) return self.prelude.map(tensor_create, adt_lst) def convert_to_tensor_array(self, adt_lst): _, cons, nil = self.prelude.mod.get_type("List") if self.prelude.length(adt_lst) == 0: return nil() checked_type = self.infer_type_with_prelude(self.prelude.hd(adt_lst)) shape = checked_type.shape tensor_array = self.map_tensor_array_constructor(adt_lst, shape) return tensor_array, tuple(shape) def infer_shape(self, inputs, mod=None): """A method to get the output type of an intermediate node in the graph.""" typ = self.infer_type(inputs, mod=mod) if hasattr(typ, "shape"): # Regular operator that outputs tensors return get_const_tuple(typ.shape) # The return type is not a tensor, for example List return typ def infer_shape_with_prelude(self, inputs): return self.infer_shape(inputs, mod=self.prelude.mod) def record_output_type(self, output): if isinstance(output, tuple): cleaned_output = [o for o in output if o is not None] types = self.infer_type_with_prelude(_expr.Tuple(cleaned_output)) for o, t in zip(cleaned_output, types.fields): self.types[o] = t elif isinstance(output, _expr.Expr): self.infer_type_with_prelude(output) # it can also happen that the type is int or so def pytorch_promote_types(self, inputs, dtypes): """This promotes TVM inputs with TVM dtypes passed like PyTorch would""" actual_dtypes = [] for i, inp in enumerate(inputs): if isinstance(inp, _expr.Expr): idt = self.infer_type(inp).dtype actual_dtypes.append(idt) else: actual_dtypes.append(dtypes[i]) dtypes = actual_dtypes tensor_dtypes = [dt for inp, dt in zip(inputs, dtypes) if not np.isscalar(inp)] non_tensor_inputs = [inp for inp in inputs if np.isscalar(inp)] result_type = _pytorch_result_type(tensor_dtypes, non_tensor_inputs) results = [] for inp, dt in zip(inputs, dtypes): if np.isscalar(inp): results.append(_expr.const(inp, dtype=result_type)) elif dt == result_type: results.append(inp) else: results.append(_op.cast(inp, result_type)) return results def is_quantized_tensor(self, data): # If a quantized Torch module is saved and loaded back, dtype will be dropped # Since dtypes from Torch tensors are not reliable in such cases, we use # Relay's type inference result to decide if an input tensor is quantized ty = self.infer_type_with_prelude(data) return ty.dtype == "uint8" # Operator implementations def make_elemwise(self, name): def elemwise(inputs, input_types): data0, data1 = self.pytorch_promote_types(inputs[:2], input_types[:2]) return get_relay_op(name)(data0, data1) return elemwise def min_max_common(self, name_elemwise, name_reduce, inputs, input_types): if len(inputs) == 1: data = self.pytorch_promote_types(inputs[:1], input_types[:1]) return get_relay_op(name_reduce)(data[0]) elif len(inputs) >= 2 and isinstance(inputs[1], int): data = self.pytorch_promote_types(inputs[:1], input_types[:1]) dim = inputs[1] keepdims = inputs[2] if len(inputs) > 2 else False # also return dummy indices return get_relay_op(name_reduce)(data[0], axis=dim, keepdims=keepdims), None else: data0, data1 = self.pytorch_promote_types(inputs[:2], input_types[:2]) return get_relay_op(name_elemwise)(data0, data1) def max(self, inputs, input_types): return self.min_max_common("maximum", "max", inputs, input_types) def min(self, inputs, input_types): return self.min_max_common("minimum", "min", inputs, input_types) def make_unary(self, name): def unary(inputs, input_types): # this is just to ensure tensor input (data,) = self.pytorch_promote_types(inputs[:1], input_types[:1]) return get_relay_op(name)(data) return unary def log1p(self, inputs, input_types): # 1_plus_log x = log(x + 1) (dtype,) = input_types one = _expr.const(1, dtype=dtype) return _op.log(inputs[0] + one) def arange(self, inputs, input_types): def _get_value(val, dtype): # dtype is a tvm dtype if isinstance(val, _expr.Expr): inp = _op.cast(val, dtype) ret, _ = try_infer_value(inp, lambda ret: _expr.const(ret, dtype)) else: ret = _create_typed_const(val, dtype) return ret def _get_type(val, inp_type): if isinstance(val, _expr.Expr): dtype = str(self.infer_type(val)) return dtype return inp_type # PyTorch arange uses the following type semantics: # - if a dtype is given, start, stop, step are converted to that dtype # - if no dtype is given and all args are integral, dtype is int64 # - if no dtype is given and there is a float arg, dtype is float32 if len(inputs) == 5: dtype0 = _get_type(inputs[0], input_types[0]) if inputs[1] is not None: dtype = _convert_dtype_value(inputs[1]) elif dtype0.startswith("float"): dtype = "float32" else: dtype = "int64" start = _expr.const(0, dtype) stop = _get_value(inputs[0], dtype) step = _expr.const(1, dtype) elif len(inputs) == 7: types = [_get_type(inputs[i], input_types[i]) for i in range(3)] if inputs[3] is not None: dtype = _convert_dtype_value(inputs[3]) elif any([t.startswith("float") for t in types]): dtype = "float32" else: dtype = "int64" start = _get_value(inputs[0], dtype) stop = _get_value(inputs[1], dtype) step = _get_value(inputs[2], dtype) else: msg = "Unknown number of arguments (%d) to parse." % (len(inputs)) raise AssertionError(msg) return _op.transform.arange(start=start, stop=stop, step=step, dtype=dtype) def squeeze(self, inputs, input_types): data = inputs[0] if len(inputs) == 1: axis = None else: # TODO (t-vi): why is the cast to int needed? similarly elsewhere axis = [int(inputs[1])] return _op.transform.squeeze(data, axis) def unsqueeze(self, inputs, input_types): data = inputs[0] axis = inputs[1] return _op.transform.expand_dims(data, int(axis), 1) def concatenate(self, inputs, input_types): def tensor_array_concat(lst, axis): assert axis == 0, "Tensor array concat supported only for axis 0" tensor_array, shape = self.convert_to_tensor_array(lst) concat_shape = (Any(),) + shape[1:] concat = self.prelude.get_global_var_static("tensor_array_concat", "float32", shape) concatenated = concat(tensor_array) static_tensor_array_ops = StaticTensorArrayOps(self.prelude, "float32", concat_shape) static_tensor_array_ops.register() get_tensor = self.prelude.get_global_var_static( "tensor_get_data", "float32", concat_shape ) return get_tensor(concatenated) data = inputs[0] axis = inputs[1] if not isinstance(data, list): return tensor_array_concat(data, axis) if isinstance(data, _expr.Expr): data = [data] return _op.tensor.concatenate(data, int(axis)) def slice(self, inputs, input_types): axis_dtype = "int64" index_size_limit = sys.maxsize data = inputs[0] dshape = self.infer_shape(data) ndim = len(dshape) dim = int(inputs[1]) stride = inputs[4] target_begin, is_begin_const = try_infer_value( inputs[2], lambda ret: ret.astype(np.int).item(0) ) target_end, is_end_const = try_infer_value( inputs[3], lambda ret: ret.astype(np.int).item(0) ) # A fast path when slicing is nop. if ( isinstance(target_begin, int) and isinstance(target_end, int) and target_begin == 0 and target_end >= index_size_limit and stride == 1 ): return data if target_begin is None and target_end is None: return data # Process begin begin = [0] * ndim if target_begin is not None: begin[dim] = target_begin if target_begin is not None and not isinstance(begin[dim], int): tmp = [] for b in begin: if isinstance(b, int): tmp.append(_op.expand_dims(_expr.const(b, axis_dtype), axis=0)) else: tmp.append(_op.cast(_op.expand_dims(b, axis=0), axis_dtype)) begin = _op.concatenate(tmp, axis=0) btype = self.infer_type(begin).dtype if str(btype) != axis_dtype: begin = _op.cast(begin, axis_dtype) # Process end if isinstance(target_end, int) and target_end >= index_size_limit: target_end = dshape[dim] if any([isinstance(d, tvm.tir.Any) for d in dshape]): end = _op.shape_of(data) else: end = dshape if isinstance(target_end, int): if isinstance(end, list): end[dim] = target_end else: all_static = True for i, shape_dim in enumerate(dshape): if i != dim and isinstance(shape_dim, tvm.tir.Any): all_static = False if all_static: end = list(get_const_tuple(dshape)) end[dim] = target_end else: target_end = _expr.const(target_end) end = _op.scatter( end, _op.expand_dims(_expr.const(dim), axis=0), _op.expand_dims(target_end, axis=0), axis=0, ) else: end = _op.cast(_op.shape_of(data), axis_dtype) if target_end is not None and not isinstance(target_end, tvm.tir.Any): ttype = self.infer_type(target_end).dtype if str(ttype) != axis_dtype: target_end = _op.cast(target_end, axis_dtype) end = _op.scatter( end, _op.expand_dims(_expr.const(dim), axis=0), _op.expand_dims(target_end, axis=0), axis=0, ) if not isinstance(end, list): etype = self.infer_type(end).dtype if str(etype) != axis_dtype: end = _op.cast(end, axis_dtype) strides = [1] * ndim strides[dim] = stride return _op.transform.strided_slice( data, begin=begin, end=end, strides=strides, slice_mode="end" ) def narrow(self, inputs, input_types): # Inputs are: # 0 - the tensor to narrow # 1 - the dimension along which to narrow # 2 - the starting dimension # 3 - the distance to the ending dimension # Lets find the ending dimension end = self.add(inputs[2:4], input_types[2:4]) stride = 1 slice_input = inputs[:3] + [end, stride] slice_types = input_types + ["int32"] return self.slice(slice_input, slice_types) def split(self, inputs, input_types): data = inputs[0] split_size = int(inputs[1]) dim = int(inputs[2]) split_index = split_size indices = [] while split_index < self.infer_shape(data)[dim]: indices.append(split_index) split_index += split_size return _op.split(data, indices, dim) def split_with_sizes(self, inputs, input_types): data = inputs[0] sections = inputs[1] dim = int(inputs[2]) if len(sections) == 1: # a special case used in torchvision detection models return _expr.TupleWrapper(_expr.Tuple([data]), 1) split_index = 0 indices = [] for i in range(len(sections) - 1): index, _ = try_infer_value(sections[i], lambda ret: int(ret)) split_index += index indices.append(split_index) return _op.split(data, indices, dim) def select(self, inputs, input_types): data = inputs[0] dim = int(inputs[1]) index = _wrap_const(inputs[2]) return _op.transform.take(data, index, axis=dim, mode="wrap") def take(self, inputs, input_types): data = inputs[0] indices = _op.cast(inputs[1], "int32") return _op.transform.take(data, indices=indices, mode="wrap") def topk(self, inputs, input_types): data = inputs[0] axis = int(inputs[2]) is_ascend = not bool(inputs[3]) sort = bool(inputs[4]) if isinstance(inputs[1], _expr.Expr): k, _ = try_infer_value(inputs[1], lambda ret: ret.tolist()) else: k = inputs[1] if not sort: msg = "Currently supports only sorted output for topk operator." raise AssertionError(msg) outs = _op.topk(data, k=k, axis=axis, is_ascend=is_ascend, ret_type="both", dtype="int64") return outs[0], outs[1] def reciprocal(self, inputs, input_types): data = inputs[0] return _expr.const(1.0, dtype=input_types[0]) / data def repeat(self, inputs, input_types): data = inputs[0] reps = [] for r in inputs[1]: if isinstance(r, int): reps.append(r) else: reps.append(int(_infer_value(r, {}).numpy())) return _op.transform.tile(data, reps=reps) def repeat_interleave(self, inputs, input_types): data = inputs[0] if isinstance(inputs[1], int): repeats = inputs[1] axis = inputs[2] elif isinstance(inputs[1], _expr.Expr): if isinstance(inputs[1], _expr.Constant): repeats = int(inputs[1].data.numpy()) else: repeats, _ = try_infer_value(inputs[1], lambda ret: ret.tolist()) axis = inputs[2] else: msg = "Only repeat with one value as repeat is currently supported." raise AssertionError(msg) if axis is None: # Flatten the data if no axis is given from torch data = _op.transform.reshape(data, [-1]) axis = 0 return _op.transform.repeat(data, repeats=repeats, axis=axis) def addcdiv(self, inputs, input_types): data, t1, t2, c = self.pytorch_promote_types(inputs[:4], input_types[:4]) return data + (c * (t1 / t2)) def addcmul(self, inputs, input_types): data, t1, t2, c = self.pytorch_promote_types(inputs[:4], input_types[:4]) return data + (c * (t1 * t2)) def where(self, inputs, input_types): if len(inputs) == 1: return self.nonzero([inputs[0], True], input_types) cond = inputs[0] x, y = self.pytorch_promote_types(inputs[1:3], input_types[1:3]) return _op.where(cond, x, y) def full_impl(self, data, fill_value, dtype): size = [] need_reshape = False new_shape = [] for dim in data: if isinstance(dim, _expr.Expr): if isinstance(dim, _expr.Constant): dim = int(dim.data.numpy()) if isinstance(size, list): size.append(dim) new_shape.append(dim) else: dim, success = try_infer_value(dim, lambda ret: int(ret), lambda: 0) new_shape.append(dim) if success: if isinstance(size, list): size.append(dim) else: size = None need_reshape = True else: if isinstance(size, list): size.append(dim) new_shape.append(dim) if size is None: tmp = [] for dim in data: tmp.append(_op.cast(_op.expand_dims(dim, axis=0), "int64")) size = _op.concatenate(tmp, axis=0) out = _op.full(_expr.const(fill_value), size, dtype=dtype) if need_reshape: out = _op.reshape(out, new_shape) return out def ones(self, inputs, input_types): data = inputs[0] import torch if not isinstance(data, (_expr.Expr, list, torch.Tensor, np.ndarray)): msg = "Data type %s could not be parsed in ones op" % (type(data)) raise AssertionError(msg) if inputs[1] is not None: dtype = _convert_dtype_value(inputs[1]) else: dtype = self.default_dtype return self.full_impl(data, 1, dtype) def ones_like(self, inputs, input_types): data = inputs[0] out = _op.ones_like(data) # If the input and the output datatype is different, do a cast if inputs[1] is not None: dtype = _convert_dtype_value(inputs[1]) else: dtype = self.default_dtype if input_types[0] != dtype: out = _op.cast(out, dtype) return out def zeros(self, inputs, input_types): data = inputs[0] import torch if not isinstance(data, (_expr.Expr, list, torch.Tensor, np.ndarray)): msg = "Data type %s could not be parsed in zeros op" % (type(data)) raise AssertionError(msg) if inputs[1] is not None: dtype = _convert_dtype_value(inputs[1]) else: dtype = self.default_dtype return self.full_impl(data, 0, dtype) def zeros_like(self, inputs, input_types): data = inputs[0] out = _op.zeros_like(data) # If the input and the output datatype is different, do a cast if inputs[1] is not None: dtype = _convert_dtype_value(inputs[1]) else: dtype = self.default_dtype if input_types[0] not in dtype: out = _op.cast(out, dtype) return out def full(self, inputs, input_types): data = inputs[0] fill_value = inputs[1] import torch if not isinstance(data, (_expr.Expr, list, torch.Tensor, np.ndarray)): msg = "Data type %s could not be parsed in full op" % (type(data)) raise AssertionError(msg) if inputs[2] is not None: # dtype given dtype = _convert_dtype_value(inputs[2]) else: # if dtype is None, torch uses a global default set by torch.set_default_tensor_type() dtype = self.default_dtype return self.full_impl(data, fill_value, dtype) def full_like(self, inputs, input_types): data = inputs[0] fill_value = inputs[1] out = _op.full_like(data, _expr.const(fill_value)) # If the input and the output datatype is different, do a cast if inputs[2] is not None: # dtype given dtype = _convert_dtype_value(inputs[2]) else: # if dtype is None, torch uses a global default set by torch.set_default_tensor_type() dtype = self.default_dtype if input_types[0] not in dtype: out = _op.cast(out, dtype) return out def linspace(self, inputs, input_types): start = inputs[0] stop = inputs[1] step = inputs[2] # Find the spacing between values as step if step != 1: step = (stop - start) / (step - 1) stop = stop + step else: stop = start + step if inputs[3] is None: import torch dtype = _convert_data_type(str(torch.get_default_dtype())) else: dtype = _convert_dtype_value(inputs[3]) start = _create_typed_const(start, dtype) stop = _create_typed_const(stop, dtype) step = _create_typed_const(step, dtype) return _op.transform.arange(start=start, stop=stop, step=step, dtype=dtype) def relu(self, inputs, input_types): data = inputs[0] if self.is_quantized_tensor(data): assert len(inputs) == 3, "Input quant param not found in op inputs" input_zero_point = _expr.const(inputs[2], dtype="int32") return qnn_torch.quantized_relu(data, input_zero_point) return _op.nn.relu(data) def prelu(self, inputs, input_types): # Reference: https://pytorch.org/docs/stable/generated/torch.nn.PReLU.html#torch.nn.PReLU data = inputs[0] dim = self.get_dims(data) ndims = len(dim) axis = 0 if ndims == 1 else 1 alpha = _op.broadcast_to(inputs[1], (dim[axis])) return _op.nn.prelu(data, alpha, axis) def leaky_relu(self, inputs, input_types): data = inputs[0] alpha = float(inputs[1]) return _op.nn.leaky_relu(data, alpha) def elu(self, inputs, input_types): data = inputs[0] dtype = input_types[0] alpha = _expr.const(-float(inputs[1]), dtype=dtype) return alpha * _op.nn.relu(_expr.const(1, dtype=dtype) - _op.exp(data)) + _op.nn.relu(data) def celu(self, inputs, input_types): data = inputs[0] dtype = input_types[0] alpha = _expr.const(float(inputs[1]), dtype=dtype) return alpha * _op.nn.relu( _expr.const(1, dtype=dtype) - _op.exp(data / alpha) ) + _op.nn.relu(data) def gelu(self, inputs, input_types): data = inputs[0] dtype = input_types[0] # gelu is data * normcdf(data) # normcdf expressed as erf because we don't currently have that intrinsic # note that there is also a fastgelu variant approximating normcdf # with tanh and third order polynomials, but this is "true" gelu return data * ( _expr.const(0.5, dtype=dtype) + _op.erf(data * _expr.const(0.5 ** 0.5, dtype=dtype)) * _expr.const(0.5, dtype=dtype) ) def selu(self, inputs, input_types): data = inputs[0] # https://pytorch.org/docs/stable/nn.html#selu dtype = input_types[0] alpha = _expr.const(-1.6732632423543772848170429916717, dtype=dtype) gamma = _expr.const(1.0507009873554804934193349852946, dtype=dtype) return gamma * ( alpha * _op.nn.relu(_expr.const(1.0, dtype=dtype) - _op.exp(data)) + _op.nn.relu(data) ) def silu(self, inputs, input_types): data = inputs[0] return data * _op.tensor.sigmoid(data) def log_sigmoid(self, inputs, input_types): data = inputs[0] return _op.log(_op.tensor.sigmoid(data)) def hard_sigmoid(self, inputs, input_types): def _relu6(x): return _op.tensor.clip(x, 0.0, 6.0) def func(x): return _relu6(x + _expr.const(3.0)) / _expr.const(6.0) if self.is_quantized_tensor(inputs[0]): input_scale = _expr.const(inputs[1]) input_zero_point = _expr.const(inputs[2]) # PyTorch seems to use the following output qparams, but accuracy # is broken if we use this. # TODO(masahi): Revisit this parameter choice # # Taken from src/ATen/native/quantized/cpu/kernels/QuantizedOpKernels.cpp # output_scale = _expr.const(0.00390625) # 1.0 / 2^8 # output_zero_point = _expr.const(-128) output_scale = input_scale output_zero_point = input_zero_point data = qnn.op.dequantize(inputs[0], input_scale, input_zero_point, axis=1) out = func(data) return qnn.op.quantize(out, output_scale, output_zero_point, out_dtype="uint8") return func(inputs[0]) def hard_swish(self, inputs, input_types): data = inputs[0] return data * self.hard_sigmoid(inputs, input_types) def adaptive_avg_pool(self, op, inputs, input_types): data = inputs[0] output_size = inputs[1] def func(x): return op(x, output_size=output_size) if self.is_quantized_tensor(data): return qnn_torch.apply_with_upcast(data, func) return func(data) def adaptive_max_pool(self, op, inputs, input_types): data = inputs[0] output_size = inputs[1] # returns dummy indices too return op(data, output_size=output_size), None @staticmethod def convert_const_list(data): if isinstance(data, list): for i, _ in enumerate(data): if isinstance(data[i], _expr.Expr): data[i] = int(_infer_value_simulated(data[i], {}).numpy()) return data def maxpool_2d(self, inputs, input_types): data = inputs[0] pool_size = self.convert_const_list(inputs[1]) strides = self.convert_const_list(inputs[2] if inputs[2] else pool_size) padding = inputs[3] dilation = inputs[4] ceil_mode = int(inputs[5]) return _op.nn.max_pool2d( data, pool_size=pool_size, strides=strides, dilation=dilation, padding=padding, layout="NCHW", ceil_mode=ceil_mode, ) def maxpool_2d_with_indices(self, inputs, input_types): # returns dummy indices too return self.maxpool_2d(inputs, input_types), None def maxpool_1d(self, inputs, input_types): data = inputs[0] pool_size = inputs[1] strides = inputs[2] if inputs[2] else pool_size padding = inputs[3] dilation = inputs[4] ceil_mode = int(inputs[5]) return _op.nn.max_pool1d( data, pool_size=pool_size, strides=strides, dilation=dilation, padding=padding, layout="NCW", ceil_mode=ceil_mode, ) def maxpool_3d(self, inputs, input_types): data = inputs[0] pool_size = inputs[1] strides = inputs[2] if inputs[2] else pool_size padding = inputs[3] dilation = inputs[4] ceil_mode = int(inputs[5]) return _op.nn.max_pool3d( data, pool_size=pool_size, strides=strides, dilation=dilation, padding=padding, ceil_mode=ceil_mode, ) def hardtanh(self, inputs, input_types): a = inputs[0] tanh_min = float(inputs[1]) tanh_max = float(inputs[2]) return _op.tensor.clip(a, tanh_min, tanh_max) def convolution(self, inputs, input_types): # Use transpose or normal use_transpose = True if inputs[6] == 1 else False data = inputs[0] weight = inputs[1] bias = inputs[2] strides = tuple(inputs[3]) padding = tuple(inputs[4]) dilation = tuple(inputs[5]) if isinstance(weight, _expr.Expr): inferred_shape = self.infer_shape(weight) weight_shape = [] for infer in inferred_shape: weight_shape.append(infer) else: msg = "Data type %s could not be parsed in conv op" % (type(weight)) raise AssertionError(msg) # Transposed convolutions have IOHW layout. if use_transpose: weight_shape[0], weight_shape[1] = weight_shape[1], weight_shape[0] channels = weight_shape[0] groups = int(inputs[8]) # Check if this is depth wise convolution # We need to reshape weight so that Relay could recognize this is depth wise # weight_shape[1] is always in_channels // groups # For depthwise, in_channels == groups, so weight_shape[1] == 1 # If groups > 1 but weight_shape[1] != 1, this is group convolution if groups > 1 and weight_shape[1] == 1: channel_multiplier = channels // groups new_weight_shape = (groups, channel_multiplier) + tuple(weight_shape[2:]) weight = _op.transform.reshape(weight, new_weight_shape) kernel_size = weight_shape[2:] use_bias = isinstance(bias, _expr.Expr) # We are trying to invoke various relay operations through a single conv_op variable. # However the function signatures for some operations have additional attributes so we # pass these in along with the standard ones. additional_arguments = dict() if use_transpose: if len(kernel_size) == 3: conv_op = _op.nn.conv3d_transpose elif len(kernel_size) == 2: conv_op = _op.nn.conv2d_transpose else: conv_op = _op.nn.conv1d_transpose output_padding = tuple(inputs[7]) additional_arguments["output_padding"] = output_padding else: if len(kernel_size) == 3: conv_op = _op.nn.conv3d elif len(kernel_size) == 2: conv_op = _op.nn.conv2d else: conv_op = _op.nn.conv1d if len(kernel_size) == 3: data_layout = "NCDHW" kernel_layout = "OIDHW" elif len(kernel_size) == 2: data_layout = "NCHW" kernel_layout = "OIHW" if use_transpose: # Transposed convolutions have IOHW layout. kernel_layout = "IOHW" else: data_layout = "NCW" kernel_layout = "OIW" # Conv1d does not currently support grouped convolution so we convert it to conv2d is_grouped_conv1d = False if groups > 1 and len(kernel_size) == 1 and not use_transpose: is_grouped_conv1d = True conv_op = _op.nn.conv2d kernel_size = [1] + kernel_size strides = (1,) + strides padding = (0,) + padding dilation = (1,) + dilation data = _op.expand_dims(data, axis=2) weight = _op.expand_dims(weight, axis=2) data_layout = "NCHW" kernel_layout = "OIHW" conv_out = conv_op( data, weight, strides=strides, padding=padding, dilation=dilation, groups=groups, channels=channels, kernel_size=kernel_size, data_layout=data_layout, kernel_layout=kernel_layout, out_layout="", out_dtype="", **additional_arguments, ) if use_bias: res = _op.nn.bias_add(conv_out, bias) else: res = conv_out if is_grouped_conv1d: # Because we conducted grouped conv1d convolution through conv2d we must # squeeze the output to get the correct result. res = _op.squeeze(res, axis=[2]) return res def softmax(self, inputs, input_types): data = inputs[0] axis = inputs[1] if isinstance(axis, str): axis = int(axis) return _op.nn.softmax(data, axis=axis) def threshold(self, inputs, input_types): data = inputs[0] return _op.nn.relu(data) def contiguous(self, inputs, input_types): return inputs[0] def batch_norm(self, inputs, input_types): data = inputs[0] data_type = input_types[0] channels = self.infer_shape(data) if isinstance(inputs[1], _expr.Expr) and isinstance(inputs[2], _expr.Expr): scale = center = True weight = inputs[1] beta = inputs[2] gamma = weight else: scale = center = False if not scale: gamma = _create_typed_const(np.ones([int(channels[1])]), data_type) if not center: beta = _create_typed_const(np.zeros([int(channels[1])]), data_type) moving_mean = inputs[3] moving_var = inputs[4] epsilon = float(inputs[7]) return _op.nn.batch_norm( data, gamma, beta, moving_mean, moving_var, axis=1, epsilon=epsilon, center=center, scale=scale, )[0] def instance_norm(self, inputs, input_types): data = inputs[0] data_type = input_types[0] channels = self.infer_shape(data) if isinstance(inputs[1], _expr.Expr) and isinstance(inputs[2], _expr.Expr): scale = center = True weight = inputs[1] beta = inputs[2] gamma = weight else: scale = center = False if not scale: gamma = _create_typed_const(np.ones([int(channels[1])]), data_type) if not center: beta = _create_typed_const(np.zeros([int(channels[1])]), data_type) epsilon = float(inputs[7]) return _op.nn.instance_norm( data, gamma, beta, axis=1, epsilon=epsilon, center=center, scale=scale ) def get_dims(self, data): import torch if isinstance(data, _expr.Expr): dims = self.infer_shape(data) elif isinstance(data, list): dims = data elif isinstance(data, (torch.Tensor, np.ndarray)): dims = data.shape else: msg = "Data type %s could not be parsed" % type(data) raise AssertionError(msg) return dims def layer_norm(self, inputs, input_types): data = inputs[0] ndims = len(self.get_dims(inputs[1])) assert ndims == 1, "Support only normalization over last one dimension." return _op.nn.layer_norm( data, gamma=inputs[2], beta=inputs[3], axis=-1, epsilon=float(inputs[4]), center=True, scale=True, ) def group_norm(self, inputs, input_types): data = inputs[0] gamma = inputs[2] beta = inputs[3] num_groups = inputs[1] epsilon = float(inputs[4]) return _op.nn.group_norm( data, gamma=gamma, beta=beta, num_groups=num_groups, axis=1, epsilon=epsilon, center=True, scale=True, ) def transpose(self, inputs, input_types): data = inputs[0] import torch if isinstance(data, _expr.Expr): ndims = len(self.infer_shape_with_prelude(data)) elif isinstance(data, list): ndims = data elif isinstance(data, (torch.Tensor, np.ndarray)): ndims = data.shape else: msg = "Data type %s could not be parsed in transpose op" % (type(data)) raise AssertionError(msg) if isinstance(data, tvm.runtime.NDArray): ndims = len(data.shape) axes = list(range(ndims)) num_inputs = len(inputs) if num_inputs == 1: if ndims >= 2: axes[-1] = ndims - 2 axes[-2] = ndims - 1 if not isinstance(data, _expr.Expr): data = _expr.const(data) elif num_inputs == 3: parse = lambda i: ndims * (i < 0) + i src, dst = [parse(int(inputs[i])) for i in [1, 2]] axes[src] = dst axes[dst] = src else: axes = inputs[1] return _op.transform.transpose(data, axes) def flatten(self, inputs, input_types): data = inputs[0] start = int(inputs[1]) end = int(inputs[2]) dshape = get_const_tuple(self.infer_shape_with_prelude(data)) ndim = len(dshape) if end < 0: end += ndim new_shape = [0] * start new_shape.append(-1) squeeze_axes = [] for i in range(start + 1, end + 1): new_shape.append(1) squeeze_axes.append(i) for _ in range(end + 1, ndim): new_shape.append(0) out = _op.reshape(data, new_shape) if squeeze_axes: out = _op.squeeze(out, axis=squeeze_axes) return out def addmm(self, inputs, input_types): input_mat = inputs[0] mat1 = inputs[1] data_type = input_types[1] mat2 = inputs[2] beta = inputs[3] alpha = inputs[4] if not isinstance(alpha, _expr.Expr) and alpha != 1: alpha = _create_typed_const(alpha, data_type) mat1 *= alpha if not isinstance(beta, _expr.Expr) and beta != 1: beta = _create_typed_const(beta, data_type) mat2 *= beta transposed_mat2 = _op.transform.transpose(mat2, axes=[1, 0]) units = self.infer_shape(transposed_mat2)[0] dense_out = _op.nn.dense(mat1, transposed_mat2, units=units) return dense_out + input_mat def size(self, inputs, input_types): shape = self.infer_shape_with_prelude(inputs[0]) axis = None if len(inputs) > 1: axis = int(inputs[1]) if any(map(lambda s: isinstance(s, tvm.tir.expr.Any), shape)): if axis is None or isinstance(shape[axis], tvm.tir.expr.Any): shape_dynamic = _op.shape_of(inputs[0], dtype="int32") if axis is not None: return _op.take(shape_dynamic, _expr.const(axis), 0) return shape_dynamic if axis is not None: return _expr.const(shape[axis]) return _expr.const(shape) def numtotensor(self, inputs, input_types): val = inputs[0] dtype = input_types[0] if isinstance(val, _expr.Expr): return val if isinstance(val, tvm.tir.IntImm): val = val.__int__() dtype = int arr = val * np.ones([]).astype(dtype) return arr def tensortonum(self, inputs, input_types): return inputs[0] def view(self, inputs, input_types): data = inputs[0] if len(inputs) == 3: shape_inp = [inputs[1], self.infer_shape(inputs[2])[0]] else: if isinstance(inputs[1], list): shape_inp = inputs[1] else: shape_inp = self.infer_shape(inputs[1]) new_shape = shape_inp for i, shape in enumerate(shape_inp): if isinstance(shape, _expr.Expr): val = _infer_value_simulated(shape, {}) new_shape[i] = val.numpy().item(0) return _op.transform.reshape(data, new_shape) def reshape(self, inputs, input_types): data = inputs[0] new_shape = inputs[1] tmp_shape = [] is_dyn = False for s in new_shape: if isinstance(s, _expr.Constant): tmp_shape.append(int(s.data.numpy())) elif isinstance(s, _expr.Expr): dim, success = try_infer_value(s, lambda ret: int(ret)) tmp_shape.append(dim) if not success: is_dyn = True else: tmp_shape.append(s) if is_dyn: new_shape = [] for i, s in enumerate(tmp_shape): if not isinstance(s, _expr.Expr): s = _expr.const(s, "int64") else: s = _op.cast(s, "int64") new_shape.append(_op.expand_dims(s, axis=0)) new_shape = _op.concatenate(new_shape, axis=0) else: new_shape = tmp_shape return _op.transform.reshape(data, new_shape) def pixel_shuffle(self, inputs, input_types): data = inputs[0] upscale_factor = inputs[1] upscale_squared = upscale_factor * upscale_factor b, c, h, w = self.infer_shape(data) assert ( c % upscale_squared == 0 ), "input channel should be divisible by square of upscale_factor" ndims = len(self.infer_shape_with_prelude(data)) axes = list(range(ndims)) num_inputs = len(inputs) oc = c // upscale_squared oh = h * upscale_factor ow = w * upscale_factor new_shape = [b, oc, upscale_factor, upscale_factor, h, w] out_shape = [b, oc, oh, ow] data = _op.transform.reshape(data, new_shape) # The data will be transposed to # [b, oc, h, upscale_factor, w, upscale_factor] # for further reshape axes = [0, 1, 4, 2, 5, 3] data = _op.transform.transpose(data, axes) return _op.transform.reshape(data, out_shape) def clone(self, inputs, input_types): data = inputs[0] return _op.tensor.copy(data) def log_softmax(self, inputs, input_types): data = inputs[0] axis = int(inputs[1]) return _op.nn.log_softmax(data, axis) def sigmoid(self, inputs, input_types): data = inputs[0] return _op.tensor.sigmoid(data) def softplus(self, inputs, input_types): data = inputs[0] dtype = input_types[0] beta = _expr.const(float(inputs[1]), dtype=dtype) return _op.log(_op.exp(inputs[0] * beta) + _expr.const(1.0, dtype=dtype)) / beta def make_avg_pool(self, dim): def avg_pool(inputs, input_types): data = inputs[0] pool_size = self.convert_const_list(inputs[1]) strides = self.convert_const_list(inputs[2] if inputs[2] else pool_size) padding = inputs[3] ceil_mode = int(inputs[4]) count_include_pad = int(inputs[5]) def func(x): if dim == 1: return _op.nn.avg_pool1d( x, pool_size=pool_size, strides=strides, padding=padding, dilation=(1,), ceil_mode=ceil_mode, count_include_pad=count_include_pad, ) elif dim == 2: return _op.nn.avg_pool2d( x, pool_size=pool_size, strides=strides, padding=padding, dilation=(1, 1), ceil_mode=ceil_mode, count_include_pad=count_include_pad, ) elif dim == 3: return _op.nn.avg_pool3d( x, pool_size=pool_size, strides=strides, padding=padding, dilation=(1, 1, 1), ceil_mode=ceil_mode, count_include_pad=count_include_pad, ) else: msg = "Average Pooling dimension should be between 1 and 3" raise RuntimeError(msg) if self.is_quantized_tensor(data): return qnn_torch.apply_with_upcast(data, func) return func(data) return avg_pool def linear(self, inputs, input_types): # https://pytorch.org/docs/stable/nn.functional.html#linear # 0 - input # 1 - weight bias = inputs[2] a_shape = self.infer_shape_with_prelude(inputs[0]) b_shape = self.infer_shape_with_prelude(inputs[1]) if len(a_shape) == 2 and len(b_shape) == 2: mm_out = _op.nn.dense(inputs[0], inputs[1]) elif len(b_shape) == 1: mm_out = self.matmul([inputs[0], inputs[1]], input_types[:2]) else: mm_out = self.matmul( [inputs[0], _op.transpose(inputs[1], axes=(1, 0))], input_types[:2] ) if isinstance(bias, _expr.Expr): bias_ndims = len(self.infer_shape_with_prelude(bias)) if bias_ndims == 1: return _op.nn.bias_add(mm_out, bias, axis=-1) mm_dtype = self.infer_type_with_prelude(mm_out).dtype return self.add([mm_out, bias], [mm_dtype, input_types[2]]) return mm_out def dropout(self, inputs, input_types): data = inputs[0] rate = float(inputs[1]) return _op.nn.dropout(data, rate) def make_reduce(self, name): def reduce(inputs, input_types): data = inputs[0] axis = None keepdims = False if len(inputs) > 2: # default, torch have only data, axis=None, keepdims=False if isinstance(inputs[1], int): axis = int(inputs[1]) elif _is_int_seq(inputs[1]): axis = inputs[1] else: axis = list(self.infer_shape(inputs[1])) keepdims = bool(inputs[2]) return get_relay_op(name)(data, axis=axis, keepdims=keepdims) return reduce def norm(self, inputs, input_types): data = inputs[0] dtype = input_types[0] axis = None keepdims = False if len(inputs) > 3: axis = inputs[2] keepdims = bool(inputs[3]) order = inputs[1] if order == np.inf: return _op.reduce.max(_op.abs(data), axis=axis, keepdims=keepdims) elif order == np.NINF: return _op.reduce.min(_op.abs(data), axis=axis, keepdims=keepdims) else: reci_order = _expr.const(1.0 / order, dtype=dtype) order = _expr.const(order) return _op.power( _op.reduce.sum(_op.power(_op.abs(data), order), axis=axis, keepdims=keepdims), reci_order, ) def frobenius_norm(self, inputs, input_types): data = inputs[0] axis = None keepdims = False if len(inputs) > 2: axis = inputs[1] if len(inputs[1]) > 0 else None keepdims = bool(inputs[2]) return _op.sqrt(_op.reduce.sum((data * data), axis=axis, keepdims=keepdims)) def std(self, inputs, input_types): data = inputs[0] if len(inputs) == 2: axis = None keepdims = False unbiased = bool(inputs[1]) else: axis = inputs[1] keepdims = bool(inputs[3]) unbiased = bool(inputs[2]) return _op.reduce.std(data, axis=axis, keepdims=keepdims, unbiased=unbiased) def variance(self, inputs, input_types): data = inputs[0] if len(inputs) == 2: axis = None keepdims = False unbiased = bool(inputs[1]) else: axis = inputs[1] keepdims = bool(inputs[3]) unbiased = bool(inputs[2]) return _op.reduce.variance(data, axis=axis, keepdims=keepdims, unbiased=unbiased) def mean(self, inputs, input_types): data = inputs[0] if inputs[1]: axis = inputs[1] else: axis = None if len(inputs) > 2 and inputs[2]: keepdims = int(inputs[2]) else: keepdims = False if len(inputs) > 3 and inputs[3]: exclude = int(inputs[3]) else: exclude = False def func(x): return _op.mean(x, axis, keepdims, exclude) if self.is_quantized_tensor(data): assert len(inputs) == 6, "Input quant param not found in op inputs" input_scale = _expr.const(inputs[4]) input_zero_point = _expr.const(inputs[5]) return qnn_torch.quantized_mean(data, input_scale, input_zero_point, func) return func(data) def chunk(self, inputs, input_types): data = inputs[0] num_chunks = int(inputs[1]) axis = int(inputs[2]) if isinstance(data, _expr.Expr): inferred_shape = self.infer_shape_with_prelude(data) shape = [] for infer in inferred_shape: shape.append(infer) dim = int(shape[axis]) if dim % num_chunks: unif_size = int(dim / (num_chunks - 1)) else: unif_size = int(dim / num_chunks) indeces = [] for i in range(unif_size, dim, unif_size): indeces.append(i) return _op.split(data, indeces, axis) def matmul(self, inputs, input_types): inputs_0 = inputs[0] inputs_1 = inputs[1] # Need to check input shape as batch matmul must be supported. a_shape = self.infer_shape_with_prelude(inputs_0) b_shape = self.infer_shape_with_prelude(inputs_1) # When performing a batch matmul, we need to properly handle N-dim shapes. if len(a_shape) > 2 and len(b_shape) > 2: # Convert a into a 3 dimensional tensors. need_reshape_output = False if len(a_shape) != 3: a = _op.reshape(inputs_0, [-1, a_shape[-2], a_shape[-1]]) need_reshape_output = True else: a = inputs_0 # Transpose matrix dimensions of b. trans_axes = list(range(len(b_shape))) trans_axes[-2], trans_axes[-1] = trans_axes[-1], trans_axes[-2] b = _op.transpose(inputs_1, trans_axes) # Convert b into a 3 dimensional tensor. Note that the last two dimensions # are transposed. if len(b_shape) != 3: b = _op.reshape(b, [-1, b_shape[-1], b_shape[-2]]) # Perform a batch matmul. output = _op.nn.batch_matmul(a, b) # Reshape output to original dimensions. if need_reshape_output: return _op.reshape(output, [*a_shape[:-2], a_shape[-2], b_shape[-1]]) return output elif len(a_shape) > 2: inputs_0 = _op.reshape(inputs_0, [-1, a_shape[-1]]) if len(b_shape) > 2: trans_axes = list(range(len(b_shape))) trans_axes[-2], trans_axes[-1] = trans_axes[-1], trans_axes[-2] input_1 = _op.reshape(_op.transpose(inputs_1, trans_axes), [-1, b_shape[-2]]) elif len(b_shape) == 2: input_1 = _op.transpose(inputs_1, axes=(1, 0)) elif len(b_shape) == 1: input_1 = _op.expand_dims(inputs_1, 0, 1) out = _op.nn.dense(inputs_0, input_1) if len(b_shape) == 1: out = _op.squeeze(out, axis=[-1]) # Reshape output into a N dimensional tensor when a or b dim > 2 if len(a_shape) > 2: out = _op.reshape(out, [*a_shape[:-1], b_shape[-1]]) elif len(b_shape) > 2: out = _op.reshape(out, [a_shape[-2], -1, b_shape[-1]]) out = _op.reshape( _op.transpose(out, [1, 0, 2]), [*b_shape[:-2], a_shape[-2], b_shape[-1]] ) return out def expand(self, inputs, input_types): data_in = inputs[0] shape = list(self.infer_shape(data_in)) ndims = len(shape) sizes = inputs[1] out = data_in out_dims = len(sizes) if ndims < out_dims: num_newaxis = out_dims - ndims out = _op.expand_dims(out, axis=0, num_newaxis=num_newaxis) shape = [1] * num_newaxis + shape for i in range(out_dims): if sizes[i] != -1 and shape[i] == 1: if not isinstance(sizes[i], int): sizes[i] = int(_infer_value(sizes[i], {}).numpy()) out = _op.repeat(out, sizes[i], axis=i) return out def int(self, inputs, input_types): if isinstance(inputs[0], _expr.Expr): return inputs[0] return int(inputs[0]) def identity(self, inputs, input_types): return inputs[0] def none(self, inputs, input_types): return None def make_pad(self, mode): def pad(inputs, input_types): data = inputs[0] if isinstance(inputs[1], list): pad_list = inputs[1] else: pad_list = list(self.infer_shape(inputs[1])) # initialize paddings based on input len pad_len = len(self.infer_shape(data)) * 2 paddings = [0] * pad_len if len(pad_list) >= 2: paddings[-1] = pad_list[1] paddings[-2] = pad_list[0] if len(pad_list) >= 4: paddings[-3] = pad_list[3] paddings[-4] = pad_list[2] if len(pad_list) >= 6: paddings[-5] = pad_list[5] paddings[-6] = pad_list[4] # group into tuple of 2 ints paddings = [paddings[i : i + 2] for i in range(0, len(paddings), 2)] const_paddings = [] non_zero_found = False for pad in paddings: const_paddings.append([]) for p in pad: if not isinstance(p, int): p = int(_infer_value(p, {}).numpy()) const_paddings[-1].append(p) if p != 0: non_zero_found = True if not non_zero_found: return data elif mode == "constant": return _op.nn.pad(data, const_paddings, pad_value=inputs[2], pad_mode=mode) else: return _op.nn.pad(data, const_paddings, pad_mode=mode) return pad def clamp(self, inputs, input_types): data = inputs[0] def get_v(v, default_v): if isinstance(v, _expr.Constant): return float(v.data.numpy()) if isinstance(v, _expr.Expr): infer_v, success = try_infer_value(v, lambda ret: float(ret)) if success: return infer_v if v is not None: return v return default_v amin = get_v(inputs[1], np.finfo(np.float32).min) amax = get_v(inputs[2], np.finfo(np.float32).max) return _op.clip(data, amin, amax) def to(self, inputs, input_types): data = inputs[0] dtype = inputs[1] if inputs[1] is not None and not isinstance(inputs[1], str) else inputs[2] # special handling for aten::to(data, 6, _, _, _) case # 6 means dtype = float # this happens when converting upsampling with scale factor cast_map = { 5: "float16", 6: "float32", 7: "float64", 3: "int32", 4: "int64", } cast_func = {5: float, 6: float, 7: float, 3: int, 4: int} ret = data if isinstance(data, _expr.Expr): actual_dtype = str(self.infer_type(data).dtype) if dtype in cast_map and cast_map[dtype] != actual_dtype: ret = _op.cast(data, cast_map[dtype]) elif dtype in cast_map: ret = cast_func[dtype](data) return ret def get_upsample_out_size(self, inputs, method): # This assumes a static shape out_size = [] if inputs[1] is not None: for size in inputs[1]: if not isinstance(size, int): out_size.append(int(_infer_value(size, {}).numpy())) else: out_size.append(size) else: scale_index = 3 if method != "nearest_neighbor" else 2 scales = inputs[scale_index] assert scales is not None, "neither out size nor scale provided" assert isinstance(scales, list) ishape = self.infer_shape(inputs[0]) for i, scale in enumerate(scales): out_size.append(int(math.floor(float(ishape[2 + i]) * scale))) return out_size def make_upsample(self, method): def upsample(inputs, input_types): data = inputs[0] out_size = self.get_upsample_out_size(inputs, method) if len(inputs) > 2 and method != "nearest_neighbor": align_corners = inputs[2] else: align_corners = False if method == "nearest_neighbor": coord_trans = "asymmetric" elif align_corners: coord_trans = "align_corners" else: coord_trans = "half_pixel" def func(x): return _op.image.resize2d( x, out_size, None, "NCHW", method, coord_trans, cubic_alpha=-0.75 ) if self.is_quantized_tensor(data): # input qparams are manually appended by us assert isinstance(inputs[-2], float) assert isinstance(inputs[-1], int) input_scale = _expr.const(inputs[-2]) input_zero_point = _expr.const(inputs[-1]) return qnn_torch.quantized_upsample(data, input_scale, input_zero_point, func) return func(data) return upsample def make_upsample3d(self, method): def upsample3d(inputs, input_types): data = inputs[0] out_size = self.get_upsample_out_size(inputs, method) if len(inputs) > 2 and method == "linear": align_corners = inputs[2] else: align_corners = False if method == "nearest_neighbor": coord_trans = "asymmetric" elif align_corners: coord_trans = "align_corners" else: coord_trans = "half_pixel" return _op.image.resize3d(data, out_size, None, "NCDHW", method, coord_trans) return upsample3d def expand_as(self, inputs, input_types): target = inputs[1] t0 = self.infer_type(inputs[0]).dtype t1 = self.infer_type(inputs[1]).dtype if str(t0) != str(t1): target = _op.cast(target, t0) return _op.broadcast_to_like(inputs[0], target) def Bool(self, inputs, input_types): assert len(inputs) == 1 return inputs[0] def Float(self, inputs, input_types): assert len(inputs) == 1 return _op.cast(inputs[0], "float32") def bitwise_not(self, inputs, input_types): data = inputs[0] # The input tensor must be of integral or Boolean types. # For bool tensors, it computes the logical NOT if input_types[0] == "bool": out = _op.logical_not(_op.cast(data, "bool")) else: out = _op.bitwise_not(_op.cast(data, "int")) return out def bitwise_xor(self, inputs, input_types): lhs = inputs[0] rhs = inputs[1] lhs = _op.cast(lhs, "bool") if input_types[0] == "bool" else _op.cast(lhs, "int") rhs = _op.cast(rhs, "bool") if input_types[1] == "bool" else _op.cast(rhs, "int") return _op.bitwise_xor(lhs, rhs) def logical_not(self, inputs, input_types): data = _wrap_const(inputs[0]) return _op.logical_not(_op.cast(data, "bool")) def logical_xor(self, inputs, input_types): lhs = _op.cast(inputs[0], "bool") rhs = _op.cast(inputs[1], "bool") return _op.logical_xor(lhs, rhs) def list_getitem(self, inputs, input_types): return self.prelude.nth(inputs[0], _wrap_const(inputs[1])) def list_len(self, inputs, input_types): return self.prelude.length(inputs[0]) def type_as(self, inputs, input_types): assert len(inputs) == 2 assert len(input_types) == 2 return _op.cast(inputs[0], input_types[1]) def gather(self, inputs, input_types): data = inputs[0] axis = inputs[1] indices = inputs[2] return _op.gather(data, axis, indices) def add(self, inputs, input_types): # add_ is overloaded for tensor add and list concat if input_types[0] == "ListType": return self.prelude.concat(inputs[0], inputs[1]) return self.make_elemwise("add")(inputs, input_types) def tensor_array_stack(self, inputs, input_types): dim = inputs[1] assert dim == 0, "stacking on a dynamic tensor list only supported on a first axis" tensor_array, shape = self.convert_to_tensor_array(inputs[0]) stacked_shape = (Any(),) + shape stack = self.prelude.get_global_var_static("tensor_array_stack", "float32", shape) stacked = stack(tensor_array) static_tensor_array_ops = StaticTensorArrayOps(self.prelude, "float32", stacked_shape) static_tensor_array_ops.register() get_tensor = self.prelude.get_global_var_static("tensor_get_data", "float32", stacked_shape) return get_tensor(stacked) def stack(self, inputs, input_types): if isinstance(inputs[0], list): # a static python list of tensors dim = inputs[1] return _op.stack(inputs[0], dim) else: # List ADT case assert isinstance(inputs[0], _expr.Expr) ty = self.infer_type_with_prelude(inputs[0]) list_ty = self.prelude.mod.get_global_type_var("List") msg = "The input list is expected to be List ADT" assert isinstance(ty, tvm.ir.TypeCall) and ty.func == list_ty, msg return self.tensor_array_stack(inputs, input_types) def rsub(self, inputs, input_types): data0, data1 = self.pytorch_promote_types(inputs[:2], input_types[:2]) # TODO (t-vi): should this also be part of the type promotion? alpha = _expr.const(float(inputs[2])) # note: rsub means data0 and data1 swap places return get_relay_op("subtract")(data1, alpha * data0) def embedding(self, inputs, input_types): weight = inputs[0] indices = inputs[1] return _op.take(weight, indices.astype("int32"), axis=0) def one_hot(self, inputs, input_types): indices = inputs[0].astype("int32") num_classes = inputs[1] if num_classes == -1: msg = "Inferring the number of classes is not yet supported." raise NotImplementedError(msg) dtype = "int32" on_value = tvm.relay.const(1.0, dtype) off_value = tvm.relay.const(0.0, dtype) return _op.one_hot(indices, on_value, off_value, num_classes, -1, dtype) def index(self, inputs, input_types): data = inputs[0] indices = inputs[1] return _op.adv_index([data] + indices) def meshgrid(self, inputs, input_types): data = inputs[0] return _op.meshgrid(data, indexing="ij") def nms(self, inputs, input_types): boxes = inputs[0] scores = inputs[1] iou_threshold = inputs[2] # TVM NMS assumes score > 0 scores = scores - _op.min(scores) + _op.const(1.0) num_boxes = _op.shape_of(scores) # PyTorch NMS doesn't have score_threshold, so no need to run get_valid_count indices = _op.transform.arange(_op.squeeze(num_boxes), dtype="int32") indices = _op.expand_dims(indices, 0, 1) # Generate data with shape (1, num_anchors, 5) scores = AttrCvt(op_name="expand_dims", extras={"axis": -1, "num_newaxis": 1})([scores], {}) data = _op.concatenate([scores, boxes], -1) data = _op.expand_dims(data, 0, 1) # Perform Non-Maximum Suppression, # PyTorch NMS doesn't have parameter top_k and max_output_size score_index = 0 top_k = max_out_size = -1 nms_ret = get_relay_op("non_max_suppression")( data=data, valid_count=num_boxes, indices=indices, max_output_size=max_out_size, iou_threshold=iou_threshold, force_suppress=True, top_k=top_k, coord_start=1, score_index=score_index, id_index=-1, return_indices=True, invalid_to_bottom=False, ) # squeeze the two outputs of nms for strided_slice size = get_relay_op("squeeze")(nms_ret[1], axis=[1]) data_slice = get_relay_op("squeeze")(nms_ret[0], axis=[0]) # strided slice to get the dynamic result ret = get_relay_op("strided_slice")( data_slice, begin=_expr.const([0]), end=size, slice_mode="size" ) # in torchvision, indices from nms are int64 return _op.cast(ret, "int64") def logsumexp(self, inputs, input_types): data = self.pytorch_promote_types(inputs[:1], input_types[:1]) dim_list = inputs[1] keepdim = inputs[2] if len(inputs) > 2 else False # dim is output of prim::ListConstruct, even if it is int in python code assert isinstance(dim_list, list), "dim is expected to be a list" return _op.logsumexp(data[0], axis=dim_list, keepdims=keepdim) def roi_align(self, inputs, input_types): data = inputs[0] boxes = inputs[1] output_size = (inputs[3], inputs[4]) spatial_scale = inputs[2] sample_ratio = inputs[5] aligned = False if len(inputs) < 7 else inputs[6] if aligned: boxes -= _expr.const(0.5 / spatial_scale) return _op.vision.roi_align(data, boxes, output_size, spatial_scale, sample_ratio) def deform_conv2d(self, inputs, input_types): data = inputs[0] weight = inputs[1] offset = inputs[2] if len(inputs) > 12: strides_offset = 5 bias = inputs[4] logging.warning("mask argument in deformable conv2d is not supported and ignored") else: strides_offset = 4 bias = inputs[3] strides = (inputs[strides_offset], inputs[strides_offset + 1]) padding = (inputs[strides_offset + 2], inputs[strides_offset + 3]) dilation = (inputs[strides_offset + 4], inputs[strides_offset + 5]) groups = inputs[strides_offset + 6] deformable_groups = inputs[strides_offset + 7] weight_shape = self.infer_shape(weight) output_channels = weight_shape[0] kernel_size = (weight_shape[2], weight_shape[3]) conv_out = _op.nn.deformable_conv2d( data, offset, weight, strides, padding, dilation, deformable_groups, groups, output_channels, kernel_size, ) return _op.nn.bias_add(conv_out, bias) def unbind(self, inputs, input_types): data = inputs[0] axis = int(inputs[1]) return unbind(data, axis) def shape_as_tensor(self, inputs, input_types): is_symbolic_shape = False input_shape = self.infer_shape(inputs[0], self.prelude.mod) for axis in input_shape: if not isinstance(axis, (int, tvm.tir.IntImm)): is_symbolic_shape = True break if is_symbolic_shape: ret = _op.shape_of(inputs[0], dtype="int64") else: ret = _expr.const(np.array(input_shape), dtype="int64") return ret def logical_and(self, inputs, input_types): lhs = _op.cast(inputs[0], "bool") rhs = _op.cast(inputs[1], "bool") return _op.logical_and(lhs, rhs) def nonzero(self, inputs, input_types, is_numpy_style=False): data = inputs[0] ret = _op.transform.argwhere(data) if is_numpy_style or (len(inputs) > 1 and inputs[1]): return unbind(ret, 1) return ret def nonzero_numpy(self, inputs, input_types): return self.nonzero(inputs, input_types, is_numpy_style=False) def scatter(self, inputs, input_types): data = inputs[0] axis = int(inputs[1]) index = inputs[2] src = inputs[3] return _op.transform.scatter(data, index, src, axis) def index_put(self, inputs, input_types): in_tensor = inputs[0] indices = inputs[1] values = inputs[2] accumulate = inputs[3] if not accumulate: mode = "update" else: mode = "add" # Combine array of index tensors into one index tensor with shape (N,_) index_tensor = _op.stack(indices, axis=0) return _op.transform.scatter_nd(in_tensor, index_tensor, values, mode) def scalar_tensor(self, inputs, input_types): data = inputs[0] cast_map = { 6: "float32", 7: "float64", 3: "int32", 4: "int64", } type_key = inputs[1] if isinstance(data, _expr.Constant): data = data.data.numpy().tolist() return _expr.const(data, cast_map[type_key]) def interpolate(self, inputs, input_types): if isinstance(inputs[1], _expr.Expr): out_size = inputs[1] elif isinstance(inputs[1], list): out_size = [] for i in [0, 1]: size, _ = try_infer_value( inputs[1][i], lambda ret: ret.astype(np.int), lambda: _op.expand_dims(inputs[1][i], axis=0), ) out_size.append(size) out_size = _op.concatenate(out_size, axis=0) data = inputs[0] align_corners = inputs[4] method = inputs[3] if method.startswith("nearest"): method = "nearest_neighbor" elif method[0:2] == "bi": method = method[2:] if method == "nearest_neighbor": coord_trans = "asymmetric" elif align_corners: coord_trans = "align_corners" else: coord_trans = "half_pixel" return _op.image.resize2d( data, out_size, None, "NCHW", method, coord_trans, cubic_alpha=-0.75 ) def numel(self, inputs, input_types): return _op.ndarray_size(inputs[0]) def empty(self, inputs, input_types): shape = inputs[0] return _op.zeros(shape, _convert_dtype_value(inputs[1])) def bincount(self, inputs, input_types): data = inputs[0] weights = inputs[1] input_type = self.infer_type(data).dtype if input_type == "int64": logger.warning( "Casting an int64 input to int32, since we do not have int64 atomic add" "needed for bincount yet." ) data = _op.cast(data, "int32") maximum = _op.max(data) dim = maximum + _expr.const(1, dtype="int32") if weights: weight_type = self.infer_type(weights) out_dtype = weight_type.dtype updates = weights else: out_dtype = "int32" updates = _op.ones_like(data) counts = _op.zeros(_op.reshape(dim, [1]), out_dtype) out = _op.scatter_add(counts, data, updates, axis=0) if input_type == "int32": # Torch always outputs int64 results for bincount return _op.cast(out, "int64") return out def scatter_add(self, inputs, input_types): data = inputs[0] axis = inputs[1] index = inputs[2] src = inputs[3] return _op.scatter_add(data, index, src, axis=axis) def cumsum(self, inputs, input_types): data = inputs[0] dim = inputs[1] dtype = inputs[2] if inputs[2] is not None: dtype = _convert_dtype_value(inputs[2]) return _op.cumsum(data, axis=dim, dtype=dtype) def masked_fill(self, inputs, input_types): mask = inputs[1] value = _op.cast(_wrap_const(inputs[2]), input_types[0]) return _op.where(mask, value, inputs[0]) def masked_select(self, inputs, input_types): mask = inputs[1] indices = self.nonzero([mask], input_types, is_numpy_style=True) return _op.adv_index([inputs[0]] + [indices[i] for i in range(indices.size)]) def sort(self, inputs, input_types): data = inputs[0] dim = inputs[1] is_descending = inputs[2] # pytorch sort returns both sorted indices and values indices = _op.argsort(data, dim, not is_descending) return _op.gather(data, dim, indices), indices def argsort(self, inputs, input_types): data = inputs[0] dim = inputs[1] is_descending = inputs[2] return _op.argsort(data, dim, not is_descending) def is_floating_point(self, inputs, input_types): assert len(inputs) == 1 if isinstance(inputs[0], _expr.Expr): input_type = self.infer_type(inputs[0]).dtype else: input_type = input_types[0] is_float = input_type in ["float32", "float64", "float16", "bfloat16"] return _expr.const(is_float) def unique(self, inputs, input_types): assert len(inputs) == 4 [data, is_sorted, return_inverse, return_counts] = inputs if not is_sorted: logger.warning("TVM always assumes sorted=True for torch.unique") is_sorted = True if return_counts: [unique, indices, inverse_indices, num_uniq, counts] = _op.unique( data, is_sorted=is_sorted, return_counts=True ) unique_sliced = _op.strided_slice(unique, begin=[0], end=num_uniq, slice_mode="size") counts_sliced = _op.strided_slice(counts, begin=[0], end=num_uniq, slice_mode="size") return (unique_sliced, inverse_indices, counts_sliced) else: [unique, indices, inverse_indices, num_uniq] = _op.unique( data, is_sorted=is_sorted, return_counts=False ) unique_sliced = _op.strided_slice(unique, begin=[0], end=num_uniq, slice_mode="size") return (unique_sliced, inverse_indices) def nll_loss(self, inputs, input_types): assert len(inputs) == 5 [predictions, targets, weights, reduction, ignore_index] = inputs num_class = self.infer_shape(predictions)[1] if reduction == 0: reduction = "none" elif reduction == 1: reduction = "mean" else: reduction = "sum" if weights is None: weights = _op.full(_expr.const(1), (num_class,), dtype=input_types[0]) return _op.nn.nll_loss(predictions, targets, weights, reduction, ignore_index) def flip(self, inputs, input_types): data = inputs[0] axis = inputs[1] return _op.transform.reverse(data, axis=axis[0]) def bidir_gru_cell( self, input_seqs, weights_dicts, ): """ Bidirectional GRU cell """ seq_len = len(input_seqs) forward_outputs, fw_H_t = gru_cell( input_seqs, **weights_dicts[0], ) reverse_outputs, rev_H_t = gru_cell( input_seqs, **weights_dicts[1], backwards=True, ) final_outputs = [] for i in range(seq_len): final_outputs.append( _op.concatenate([forward_outputs[i], reverse_outputs[seq_len - 1 - i]], axis=-1) ) return final_outputs, _op.stack([fw_H_t, rev_H_t], axis=0) def gru_layers(self, input_data, layer_weights_dicts, bidirectional, dropout_p=0.0): """ Methods iterates layers for Stacked GRU """ layers_num = len(layer_weights_dicts) # split input sequence to samples set input_seqs = unbind(input_data, 0) # [seq_num, (batch, feature_size)] output_hiddens = [] for i in range(layers_num): weights_dicts = layer_weights_dicts[i] # input_seqs shape = [seq_num, (batch, feature_size)] or # [seq_num, (batch, 2*feature_size)] for bidirectional if bidirectional: input_seqs, H_t = self.bidir_gru_cell(input_seqs, weights_dicts) else: input_seqs, H_t = gru_cell(input_seqs, **weights_dicts[0]) output_hiddens.append(H_t) # TODO (vvchernov): in pytorch implementation train is also checked # see https://github.com/pytorch/pytorch/blob/70c8daf43946b53af6493d058899ef952d27d339 # /aten/src/ATen/native/RNN.cpp#L1054 if dropout_p != 0 and i < layers_num - 1: # for input in input_seqs: # input = _op.dropout(input, dropout_p) raise NotImplementedError("Dropout for GRU has not been supported yet!") return _op.stack(input_seqs, 0), _op.stack(output_hiddens, 0) def gru(self, inputs, input_types): """ Description of GRU in pytorch: https://pytorch.org/docs/stable/generated/torch.nn.GRU.html?highlight=gru#torch.nn.GRU """ # TODO (vvchernov): support dropout assert len(inputs) == 9, "Input of size 9 is expected" # Unpack inputs, note that if optional and not provided then value will be None. _X = inputs[0] # _X shape (seq_num, batch, feature_size) or (batch, seq_num, feature_size) hidden_state = inputs[1] # Hidden state shape (hidden_layers_num, batch, hidden_size) _weights = inputs[2] # Wi layer[0] shape (3 * hidden_size, feature_size) # Wh layer[0] shape (3 * hidden_size, hidden_size) # Bi layer[0] shape (3 * hidden_size) # Bh layer[0] shape (3 * hidden_size) # Wi layer[>0] shape (3 * hidden_size, hidden_size * num_directions) # Wh layer[>0] shape (3 * hidden_size, hidden_size) # Bi layer[>0] shape (3 * hidden_size) # Bh layer[>0] shape (3 * hidden_size) # Scalar inputs has_biases = inputs[3] num_layers = inputs[4] dropout_p = inputs[5] # dropout probability, if 0.0 it means there is no dropout # train = inputs[6] bidirectional = inputs[7] batch_first = inputs[8] num_directions = 1 if bidirectional: num_directions = 2 rsd = len(_weights) % num_layers assert rsd == 0, "The number of weights must be a multiple of the number of layers!" rsd = (len(_weights) / num_layers) % num_directions assert ( rsd == 0 ), "The number of weights in layer must be a multiple of the number of directions!" weights_num = int(len(_weights) / num_layers / num_directions) if has_biases: assert weights_num == 4, "The weights number in layer is expected equal to 4" else: assert weights_num == 2, "The weights number in layer is expected equal to 2" X = _op.transpose(_X, (1, 0, 2)) if batch_first else _X # TODO (vvchernov): Which data type should be used? from input or weights? # Instead of it _infer_type(X).checked_type.dtype can be used X_dtype = input_types[0] X_shape = _infer_shape(X) # (seq_num, batch, feature_size) hidden_size = int(_infer_shape(_weights[0])[0] / 3) batch_size = X_shape[1] # Initialize hidden states if not provided. layers_h = [] hidden_layers_num = num_directions * num_layers if hidden_state is None: h_0 = _op.zeros((batch_size, hidden_size), X_dtype) for i in range(hidden_layers_num): layers_h.append(h_0) else: layers_h = unbind(hidden_state, 0) layer_weights_dicts = [] k = 0 # layer counter if has_biases: names = ["hidden_state", "w_inp", "w_hid", "b_inp", "b_hid"] if bidirectional: rsd = len(_weights) % (2 * weights_num) assert rsd == 0, "got an incorrect number of GRU weights" for i in range(0, len(_weights), 2 * weights_num): fw_tensors = [layers_h[2 * k], *_weights[i : i + 4]] fw_weights_dict = dict(zip(names, fw_tensors)) j = i + weights_num rev_tensors = [layers_h[2 * k + 1], *_weights[j : j + 4]] rev_weights_dict = dict(zip(names, rev_tensors)) layer_weights_dicts.append([fw_weights_dict, rev_weights_dict]) k += 1 else: assert len(_weights) % weights_num == 0, "got an incorrect number of GRU weights" for i in range(0, len(_weights), weights_num): fw_tensors = [layers_h[k], *_weights[i : i + 4]] fw_weights_dict = dict(zip(names, fw_tensors)) layer_weights_dicts.append([fw_weights_dict]) k += 1 else: names = ["hidden_state", "w_inp", "w_hid"] if bidirectional: rsd = len(_weights) % (2 * weights_num) assert rsd == 0, "got an incorrect number of GRU weights" for i in range(0, len(_weights), 2 * weights_num): fw_tensors = [layers_h[2 * k], *_weights[i : i + 2]] fw_weights_dict = dict(zip(names, fw_tensors)) j = i + weights_num rev_tensors = [layers_h[2 * k + 1], *_weights[j : j + 2]] rev_weights_dict = dict(zip(names, rev_tensors)) layer_weights_dicts.append([fw_weights_dict, rev_weights_dict]) k += 1 else: assert len(_weights) % weights_num == 0, "got an incorrect number of GRU weights" for i in range(0, len(_weights), weights_num): fw_tensors = [layers_h[k], *_weights[i : i + 2]] fw_weights_dict = dict(zip(names, fw_tensors)) layer_weights_dicts.append([fw_weights_dict]) k += 1 assert ( len(layer_weights_dicts) == num_layers and k == num_layers ), "For stacked GRU number of weights sets should be the same as number of layers!" output, out_hidden_state = self.gru_layers( X, layer_weights_dicts, bidirectional, dropout_p=dropout_p, ) # output shape = (seq_num, batch, hidden_size) or # (seq_num, batch, 2*feature_size) for bidirectional if batch_first: output = _op.transpose(output, (1, 0, 2)) return (output, out_hidden_state) def bidir_lstm_cell( self, input_seqs, weights_dicts, ): """ Bidirectional LSTM cell """ seq_len = len(input_seqs) forward_outputs, fw_H_t, fw_C_t = lstm_cell( input_seqs, **weights_dicts[0], ) reverse_outputs, rev_H_t, rev_C_t = lstm_cell( input_seqs, **weights_dicts[1], backwards=True, ) final_outputs = [] for i in range(seq_len): final_outputs.append( _op.concatenate([forward_outputs[i], reverse_outputs[seq_len - 1 - i]], axis=-1) ) return final_outputs, (fw_H_t, fw_C_t), (rev_H_t, rev_C_t) def lstm_layers(self, input_data, layer_weights_dicts, bidirectional, dtype, dropout_p=0.0): """ Methods iterates layers for Stacked LSTM """ layers_num = len(layer_weights_dicts) # split input sequence to samples set input_seqs = unbind(input_data, 0) # [seq_num, (batch, feature_size)] output_hiddens = [] for i in range(layers_num): weights_dicts = layer_weights_dicts[i] # input_seqs shape = [seq_num, (batch, feature_size)] or # [seq_num, (batch, 2*feature_size)] for bidirectional if bidirectional: input_seqs, H_t, C_t = self.bidir_lstm_cell(input_seqs, weights_dicts) else: input_seqs, H_t, C_t = lstm_cell(input_seqs, **weights_dicts[0]) output_hiddens.append((H_t, C_t)) # TODO (vvchernov): in pytorch implementation train is also checked # see https://github.com/pytorch/pytorch/blob/70c8daf43946b53af6493d058899ef952d27d339 # /aten/src/ATen/native/RNN.cpp#L1054 if dropout_p != 0 and i < layers_num - 1: # for input in input_seqs: # input = _op.dropout(input, dropout_p) raise NotImplementedError("Dropout for LSTM has not been supported yet!") final_hiddens = [] if bidirectional: for output_hidden in output_hiddens: final_hiddens.append(output_hidden[0]) final_hiddens.append(output_hidden[1]) else: final_hiddens = output_hiddens return _op.stack(input_seqs, 0), final_hiddens def lstm(self, inputs, input_types): """ Description of LSTM in pytorch:https://pytorch.org/docs/stable/generated/torch.nn.LSTM.html Native implementation for torch version less than 1.8.0 (projection is unsupported): https://github.com/pytorch/pytorch/blob/70c8daf43946b53af6493d058899ef952d27d339/aten/ \ src/ATen/native/RNN.cpp#L1396 Native implementation for torch version from 1.8.0 and higher (projection is supported): https://github.com/pytorch/pytorch/blob/master/aten/src/ATen/native/RNN.cpp#L1483 """ # TODO (vvchernov): support dropout assert len(inputs) == 9, "Input of size 9 is expected" # Unpack inputs, note that if optional and not provided then value will be None. _X = inputs[0] # _X shape (seq_num, batch, feature_size) or (batch, seq_num, feature_size) hidden_states = inputs[1] assert len(hidden_states) == 2, "lstm expects two hidden states" h_0 = hidden_states[0] c_0 = hidden_states[1] # H0 shape (hidden_layers_num, batch, proj_size) if projection # else (hidden_layers_num, batch, hidden_size) # C0 shape (hidden_layers_num, batch, hidden_size) _weights = inputs[2] # If no projection # Wi layer[0] shape (4 * hidden_size, feature_size) # Wh layer[0] shape (4 * hidden_size, hidden_size) # Bi layer[0] shape (4 * hidden_size) # Bh layer[0] shape (4 * hidden_size) # Wi layer[>0] shape (4 * hidden_size, hidden_size * num_directions) # Wh layer[>0] shape (4 * hidden_size, hidden_size) # Bi layer[>0] shape (4 * hidden_size) # Bh layer[>0] shape (4 * hidden_size) # If projection # Wi layer[0] shape (4 * hidden_size, feature_size) # Wh layer[0] shape (4 * hidden_size, proj_size) # Bi layer[0] shape (4 * hidden_size) # Bh layer[0] shape (4 * hidden_size) # P layer[0] shape (proj_size, hidden_size) # Wi layer[>0] shape (4 * hidden_size, proj_size * num_directions) # Wh layer[>0] shape (4 * hidden_size, proj_size) # Bi layer[>0] shape (4 * hidden_size) # Bh layer[>0] shape (4 * hidden_size) # P layer[>0] shape (proj_size, hidden_size) # Scalar inputs has_biases = inputs[3] num_layers = inputs[4] dropout_p = inputs[5] # dropout probability, if 0.0 it means there is no dropout # train = inputs[6] bidirectional = inputs[7] batch_first = inputs[8] num_directions = 1 if bidirectional: num_directions = 2 rsd = len(_weights) % num_layers assert rsd == 0, "The number of weights must be a multiple of the number of layers!" rsd = (len(_weights) / num_layers) % num_directions assert ( rsd == 0 ), "The number of weights in layer must be a multiple of the number of directions!" has_proj = False proj_size = 0 weights_num = int(len(_weights) / num_layers / num_directions) if has_biases: if weights_num == 5: has_proj = True proj_size = _infer_shape(_weights[4])[0] else: assert weights_num == 4, "The weights number in layer is expected equal to 4" else: if weights_num == 3: has_proj = True proj_size = _infer_shape(_weights[2])[0] else: assert weights_num == 2, "The weights number in layer is expected equal to 2" X = _op.transpose(_X, (1, 0, 2)) if batch_first else _X # TODO (vvchernov): Which data type should be used? from input or weights? # Instead of it _infer_type(X).checked_type.dtype can be used X_dtype = input_types[0] X_shape = _infer_shape(X) # (seq_num, batch, feature_size) hidden_size = _infer_shape(_weights[0])[0] / 4 batch_size = X_shape[1] # Initialize hidden states if not provided. layers_h = [] layers_c = [] hidden_layers_num = num_directions * num_layers if h_0 is None: if has_proj: h_0 = _op.zeros((batch_size, proj_size), X_dtype) else: h_0 = _op.zeros((batch_size, hidden_size), X_dtype) for i in range(hidden_layers_num): layers_h.append(h_0) else: layers_h = unbind(h_0, 0) if c_0 is None: c_0 = _op.zeros((batch_size, hidden_size), X_dtype) for i in range(hidden_layers_num): layers_c.append(c_0) else: layers_c = unbind(c_0, 0) layer_weights_dicts = [] k = 0 # layer counter if has_biases: names = ["hidden_state", "cell_state", "w_inp", "w_hid", "b_inp", "b_hid"] if bidirectional: rsd = len(_weights) % (2 * weights_num) assert rsd == 0, "got an incorrect number of LSTM weights" for i in range(0, len(_weights), 2 * weights_num): fw_tensors = [layers_h[2 * k], layers_c[2 * k], *_weights[i : i + 4]] fw_weights_dict = dict(zip(names, fw_tensors)) if has_proj: fw_weights_dict["proj"] = _weights[i + 4] j = i + weights_num rev_tensors = [layers_h[2 * k + 1], layers_c[2 * k + 1], *_weights[j : j + 4]] rev_weights_dict = dict(zip(names, rev_tensors)) if has_proj: rev_weights_dict["proj"] = _weights[j + 4] layer_weights_dicts.append([fw_weights_dict, rev_weights_dict]) k += 1 else: assert len(_weights) % weights_num == 0, "got an incorrect number of LSTM weights" for i in range(0, len(_weights), weights_num): fw_tensors = [layers_h[k], layers_c[k], *_weights[i : i + 4]] fw_weights_dict = dict(zip(names, fw_tensors)) if has_proj: fw_weights_dict["proj"] = _weights[i + 4] layer_weights_dicts.append([fw_weights_dict]) k += 1 else: names = ["hidden_state", "cell_state", "w_inp", "w_hid"] if bidirectional: rsd = len(_weights) % (2 * weights_num) assert rsd == 0, "got an incorrect number of LSTM weights" for i in range(0, len(_weights), 2 * weights_num): fw_tensors = [layers_h[2 * k], layers_c[2 * k], *_weights[i : i + 2]] fw_weights_dict = dict(zip(names, fw_tensors)) if has_proj: fw_weights_dict["proj"] = _weights[i + 2] j = i + weights_num rev_tensors = [layers_h[2 * k + 1], layers_c[2 * k + 1], *_weights[j : j + 2]] rev_weights_dict = dict(zip(names, rev_tensors)) if has_proj: rev_weights_dict["proj"] = _weights[j + 2] layer_weights_dicts.append([fw_weights_dict, rev_weights_dict]) k += 1 else: assert len(_weights) % weights_num == 0, "got an incorrect number of LSTM weights" for i in range(0, len(_weights), weights_num): fw_tensors = [layers_h[k], layers_c[k], *_weights[i : i + 2]] fw_weights_dict = dict(zip(names, fw_tensors)) if has_proj: fw_weights_dict["proj"] = _weights[i + 2] layer_weights_dicts.append([fw_weights_dict]) k += 1 assert ( len(layer_weights_dicts) == num_layers and k == num_layers ), "For stacked LSTM number of weights sets should be the same as number of layers!" outputs = self.lstm_layers( X, layer_weights_dicts, bidirectional, dtype=X_dtype, dropout_p=dropout_p, ) # output shape = (seq_num, batch, hidden_size) or # (seq_num, batch, 2*feature_size) for bidirectional output = outputs[0] hy = [] cy = [] for hidden in outputs[1]: hy.append(hidden[0]) cy.append(hidden[1]) if batch_first: output = _op.transpose(output, (1, 0, 2)) return (output, _op.stack(hy, 0), _op.stack(cy, 0)) def all_any_common(self, op, inputs, input_types): dim = inputs[1] keepdim = inputs[2] if self.infer_type(inputs[0]).dtype != "bool": # The input dtype can be uint8. inp = _op.cast(inputs[0], "bool") else: inp = inputs[0] return op(inp, axis=dim, keepdims=keepdim) def searchsorted_common(self, sorted_sequence, values, out_int32, right): dtype = "int32" if out_int32 else "int64" values_shape = _infer_shape(values) if len(values_shape) == 0: values = _op.expand_dims(values, 0) out = _op.searchsorted(sorted_sequence, values, right=right, dtype=dtype) if len(values_shape) == 0: return _op.squeeze(out) return out def searchsorted(self, inputs, input_types): return self.searchsorted_common(*inputs) def bucketize(self, inputs, input_types): return self.searchsorted_common(inputs[1], inputs[0], inputs[2], inputs[3]) def roll(self, inputs, input_types): def slide_axes(inp, shape, ax): axes = list(range(len(shape))) axes = axes[:ax] + [-1] + axes[ax:-1] return _op.transpose(inp, axes) x = inputs[0] shifts = inputs[1] dims = inputs[2] shape = self.infer_shape(x) start = _expr.const(0, "int64") step = _expr.const(1, "int64") out = x for i, dim in enumerate(dims): roll_dim = _expr.const(shape[dim], "int64") indices_1d = _op.mod( _op.transform.arange(start, roll_dim, step, "int64") - _expr.const(shifts[i], "int64") + roll_dim, roll_dim, ) # First fill in the last axis with roll indices, and then do transpose to # bring the roll indices into the desired axis. indices = slide_axes( _op.tile(indices_1d, shape[:dim] + shape[dim + 1 :] + (1,)), shape, dim, ) out = _op.gather(out, dim, indices) return out def einsum(self, inputs, input_types): equation, data = inputs return _op.einsum(data, equation) # Operator mappings def create_convert_map(self): self.convert_map = { "aten::is_floating_point": self.is_floating_point, "aten::pixel_shuffle": self.pixel_shuffle, "aten::device": self.none, "prim::device": self.none, "aten::sub": self.make_elemwise("subtract"), "aten::max": self.max, "aten::min": self.min, "aten::mul": self.make_elemwise("multiply"), "aten::pow": self.make_elemwise("power"), "aten::arange": self.arange, "aten::meshgrid": self.meshgrid, "aten::div": self.make_elemwise("divide"), "aten::floor_divide": self.make_elemwise("floor_divide"), "aten::true_divide": self.make_elemwise("divide"), "aten::addcdiv": self.addcdiv, "aten::addcmul": self.addcmul, "aten::ones": self.ones, "aten::ones_like": self.ones_like, "aten::zeros": self.zeros, "aten::zeros_like": self.zeros_like, "aten::full": self.full, "aten::full_like": self.full_like, "aten::linspace": self.linspace, "aten::reciprocal": self.reciprocal, "aten::repeat": self.repeat, "aten::repeat_interleave": self.repeat_interleave, "aten::to": self.to, "aten::squeeze": self.squeeze, "aten::unsqueeze": self.unsqueeze, "aten::cat": self.concatenate, "aten::slice": self.slice, "aten::narrow": self.narrow, "aten::split": self.split, "aten::split_with_sizes": self.split_with_sizes, "aten::select": self.select, "aten::take": self.take, "aten::where": self.where, "aten::topk": self.topk, "aten::relu": self.relu, "aten::prelu": self.prelu, "aten::leaky_relu": self.leaky_relu, "aten::elu": self.elu, "aten::celu": self.celu, "aten::gelu": self.gelu, "aten::selu": self.selu, "aten::silu": self.silu, "aten::log_sigmoid": self.log_sigmoid, "aten::adaptive_avg_pool1d": functools.partial( self.adaptive_avg_pool, _op.nn.adaptive_avg_pool1d ), "aten::adaptive_avg_pool2d": functools.partial( self.adaptive_avg_pool, _op.nn.adaptive_avg_pool2d ), "aten::adaptive_avg_pool3d": functools.partial( self.adaptive_avg_pool, _op.nn.adaptive_avg_pool3d ), "aten::adaptive_max_pool1d": functools.partial( self.adaptive_max_pool, _op.nn.adaptive_max_pool1d ), "aten::adaptive_max_pool2d": functools.partial( self.adaptive_max_pool, _op.nn.adaptive_max_pool2d ), "aten::adaptive_max_pool3d": functools.partial( self.adaptive_max_pool, _op.nn.adaptive_max_pool3d ), "aten::max_pool2d": self.maxpool_2d, "aten::max_pool2d_with_indices": self.maxpool_2d_with_indices, "aten::max_pool1d": self.maxpool_1d, "aten::max_pool3d": self.maxpool_3d, "aten::hardtanh": self.hardtanh, "aten::_convolution": self.convolution, "aten::softmax": self.softmax, "aten::threshold": self.threshold, "aten::contiguous": self.contiguous, "aten::batch_norm": self.batch_norm, "aten::instance_norm": self.instance_norm, "aten::layer_norm": self.layer_norm, "aten::group_norm": self.group_norm, "aten::transpose": self.transpose, "aten::t": self.transpose, "aten::flatten": self.flatten, "aten::addmm": self.addmm, "aten::size": self.size, "aten::view": self.view, "aten::reshape": self.reshape, "aten::clone": self.clone, "aten::log_softmax": self.log_softmax, "aten::sigmoid": self.sigmoid, "aten::softplus": self.softplus, "aten::avg_pool1d": self.make_avg_pool(1), "aten::avg_pool2d": self.make_avg_pool(2), "aten::avg_pool3d": self.make_avg_pool(3), "aten::linear": self.linear, "aten::dropout": self.dropout, "aten::feature_dropout": self.dropout, "aten::alpha_dropout": self.dropout, "aten::mean": self.mean, "aten::chunk": self.chunk, "aten::unsafe_chunk": self.chunk, "aten::matmul": self.matmul, "aten::bmm": self.matmul, "aten::expand": self.expand, "aten::Int": self.int, "prim::NumToTensor": self.numtotensor, "prim::ImplicitTensorToNum": self.tensortonum, "aten::ScalarImplicit": self.tensortonum, "aten::constant_pad_nd": self.make_pad("constant"), "aten::reflection_pad1d": self.make_pad("reflect"), "aten::reflection_pad2d": self.make_pad("reflect"), "aten::replication_pad1d": self.make_pad("edge"), "aten::replication_pad2d": self.make_pad("edge"), "aten::replication_pad3d": self.make_pad("edge"), "aten::permute": self.transpose, "aten::sum": self.make_reduce("sum"), "aten::prod": self.make_reduce("prod"), "aten::argmin": self.make_reduce("argmin"), "aten::argmax": self.make_reduce("argmax"), "aten::norm": self.norm, "aten::frobenius_norm": self.frobenius_norm, "aten::std": self.std, "aten::var": self.variance, "aten::abs": self.make_unary("abs"), "aten::neg": self.make_unary("negative"), "aten::cos": self.make_unary("cos"), "aten::cosh": self.make_unary("cosh"), "aten::sin": self.make_unary("sin"), "aten::sinh": self.make_unary("sinh"), "aten::tan": self.make_unary("tan"), "aten::tanh": self.make_unary("tanh"), "aten::acos": self.make_unary("acos"), "aten::asin": self.make_unary("asin"), "aten::atan": self.make_unary("atan"), "aten::log": self.make_unary("log"), "aten::log2": self.make_unary("log2"), "aten::log10": self.make_unary("log10"), "aten::log1p": self.log1p, "aten::exp": self.make_unary("exp"), "aten::erf": self.make_unary("erf"), "aten::trunc": self.make_unary("trunc"), "aten::sign": self.make_unary("sign"), "aten::sqrt": self.make_unary("sqrt"), "aten::rsqrt": self.make_unary("rsqrt"), "aten::ceil": self.make_unary("ceil"), "aten::floor": self.make_unary("floor"), "aten::round": self.make_unary("round"), "aten::isfinite": self.make_unary("isfinite"), "aten::isinf": self.make_unary("isinf"), "aten::isnan": self.make_unary("isnan"), "aten::clamp": self.clamp, "aten::detach": self.identity, "aten::upsample_bilinear2d": self.make_upsample("linear"), "aten::upsample_bicubic2d": self.make_upsample("cubic"), "aten::upsample_nearest2d": self.make_upsample("nearest_neighbor"), "aten::upsample_trilinear3d": self.make_upsample3d("linear"), "aten::upsample_nearest3d": self.make_upsample3d("nearest_neighbor"), "aten::expand_as": self.expand_as, "aten::lt": self.make_elemwise("less"), "aten::gt": self.make_elemwise("greater"), "aten::le": self.make_elemwise("less_equal"), "aten::ge": self.make_elemwise("greater_equal"), "aten::ne": self.make_elemwise("not_equal"), "aten::eq": self.make_elemwise("equal"), "aten::logical_not": self.logical_not, "aten::logical_xor": self.logical_xor, "aten::bitwise_not": self.bitwise_not, "aten::bitwise_xor": self.bitwise_xor, "aten::Bool": self.Bool, "aten::Float": self.Float, "aten::rsub": self.rsub, "aten::embedding": self.embedding, "aten::one_hot": self.one_hot, "aten::mm": self.matmul, "aten::add": self.add, "aten::stack": self.stack, "aten::__getitem__": self.list_getitem, "aten::len": self.list_len, "aten::type_as": self.type_as, "aten::gather": self.gather, "aten::index_select": self.select, "aten::index": self.index, "torchvision::nms": self.nms, "aten::logsumexp": self.logsumexp, "torchvision::roi_align": self.roi_align, "torchvision::deform_conv2d": self.deform_conv2d, "aten::unbind": self.unbind, "aten::__and__": self.logical_and, "aten::logical_and": self.logical_and, "aten::_shape_as_tensor": self.shape_as_tensor, "aten::nonzero": self.nonzero, "aten::nonzero_numpy": self.nonzero_numpy, "aten::scatter": self.scatter, "aten::index_put": self.index_put, "aten::scalar_tensor": self.scalar_tensor, "aten::__interpolate": self.interpolate, "aten::IntImplicit": self.identity, "aten::tensor": self.identity, # used for example in tensor(1.0) "aten::numel": self.numel, "aten::empty": self.empty, "aten::bincount": self.bincount, "aten::scatter_add": self.scatter_add, "aten::__not__": self.logical_not, "aten::hardswish": self.hard_swish, "aten::hardsigmoid": self.hard_sigmoid, "aten::cumsum": self.cumsum, "aten::masked_fill": self.masked_fill, "aten::masked_select": self.masked_select, "aten::argsort": self.argsort, "aten::sort": self.sort, "aten::_unique2": self.unique, "aten::nll_loss": self.nll_loss, "aten::nll_loss2d": self.nll_loss, "aten::nll_loss_nd": self.nll_loss, "aten::flip": self.flip, "aten::gru": self.gru, "aten::lstm": self.lstm, "aten::all": functools.partial(self.all_any_common, _op.all), "aten::any": functools.partial(self.all_any_common, _op.any), "aten::searchsorted": self.searchsorted, "aten::bucketize": self.bucketize, "aten::roll": self.roll, "aten::einsum": self.einsum, } def update_convert_map(self, custom_map): self.convert_map.update(custom_map) def report_missing_conversion(self, op_names): """Check if all ops in an input graph are supported by TVM""" known_ops = [ "prim::Constant", "prim::GetAttr", "prim::ListConstruct", "prim::ListUnpack", "prim::TupleConstruct", "prim::TupleUnpack", "prim::RaiseException", "prim::If", "prim::Loop", ] known_ops += list(self.convert_map.keys()) known_ops += list(qnn_torch.convert_map.keys()) missing = [] for op_name in op_names: # Also take care of in-place variant ops like aten::relu_ if op_name not in known_ops and not ( op_name.endswith("_") and op_name[:-1] in known_ops ): missing.append(op_name) if missing: msg = "The following operators are not implemented: {}".format(missing) raise NotImplementedError(msg) def convert_block(self, block, outputs): """Translate Torch "Block", used for prim::If and prim::Loop""" ops = _get_operator_nodes(block.nodes()) ret_names = _get_input_names(block.returnNode()) return self.convert_operators(ops, outputs, ret_names) def convert_if(self, if_node, outputs): """Translate Torch prim::If to Relay If""" cond = outputs[if_node.inputsAt(0).debugName()] blocks = list(if_node.blocks()) true_branch = self.convert_block(blocks[0], outputs) false_branch = self.convert_block(blocks[1], outputs) assert len(true_branch) == 1 and len(false_branch) == 1 return _expr.If(cond, true_branch[0], false_branch[0]) def convert_loop(self, loop_node, outputs): """Translate Torch prim::Loop to Relay while_loop""" def get_input(index): ivalue = loop_node.inputsAt(index) inode = ivalue.node() if inode.kind() == "prim::Constant": return _expr.const(_get_constant(inode)) var_name = ivalue.debugName() assert var_name in outputs return _wrap_const(outputs[var_name]) # Refer to the spec for prim::Loop below # https://github.com/pytorch/pytorch/blob/master/torch/csrc/jit/OVERVIEW.md#loops # The first input: %max_trip_count # The second input: %initial_condition # The rest of input: loop variables max_loop_count = get_input(0) init_cond = get_input(1) num_loop_var = len(list(loop_node.inputs())) - 2 init_vals = [get_input(i + 2) for i in range(num_loop_var)] # while loop has always max_loop_count being int64 max # max_loop_count.data (tvm.runtime.NDArray) is -1, so _get_constant again is_while_loop = ( isinstance(max_loop_count, _expr.Constant) and _get_constant(loop_node.inputsAt(0).node()) == sys.maxsize ) if is_while_loop: loop_iter_dtype = "bool" # while loop with non input dependent condition such as while i < 10: # init_cond is int, need to cast to bool to type check if isinstance(init_cond, _expr.Constant): init_cond = _op.cast(init_cond, "bool") init_loop_iter_val = init_cond else: loop_iter_dtype = "int32" # always count from 0 init_loop_iter_val = _expr.const(0, dtype="int32") body_block = list(loop_node.blocks())[0] block_input_names = _get_input_names(body_block) num_block_inputs = len(block_input_names) name_val_pairs = list(zip(block_input_names, [init_loop_iter_val] + init_vals)) outputs.update(name_val_pairs) def get_var(name, val): if val: checked_type = self.infer_type_with_prelude(val) if hasattr(checked_type, "shape"): shape = get_const_tuple(checked_type.shape) actual_shape = [] for dim in shape: if isinstance(dim, int) and dim == 0: actual_shape.append(Any()) else: actual_shape.append(dim) return _expr.var(name, shape=actual_shape, dtype=checked_type.dtype) else: return _expr.var(name, type_annotation=checked_type) return _expr.var(name) loop_iter_var = _expr.var(block_input_names[0], shape=(), dtype=loop_iter_dtype) loop_vars = [get_var(name, val) for name, val in name_val_pairs[1:]] # Add non constant free variables to loop variables to prevent code blow up # Without this, if there are two for loops in a row, which often happens # if the outer loop is unrolled, the computation corresponding to the first for loop # is inlined inside loop body, turning O(N) + O(N) computation into O(N^2). # This issue was found when converting from Stacked LSTM test. Torch does not add the # outputof the eariler loop into loop variables of the next loop. # So the variable corresponding to the first loop output appears free in the second # loop body. free_vars = [ var for var in _get_free_vars_from_block(body_block) if var in outputs and not isinstance(outputs[var], (_expr.Constant, int, float, str)) and outputs[var] ] prev_outputs = {} for name in free_vars: prev_output = outputs[name] new_loop_var = get_var(name, prev_output) prev_outputs[name] = prev_output outputs[name] = new_loop_var loop_vars.append(new_loop_var) init_vals.append(prev_output) def cond(*current_vals): i = current_vals[0] if is_while_loop: return _op.equal(i, _expr.const(True, "bool")) return _op.less(i, max_loop_count) def body(*current_vals): # Update loop variables using the prev iteration outputs assert len(current_vals) == num_block_inputs + len(free_vars) for (i, val) in enumerate(current_vals): if i < num_block_inputs: outputs[block_input_names[i]] = val else: outputs[free_vars[i - num_block_inputs]] = val block_outputs = self.convert_block(body_block, outputs) block_outputs += [outputs[name] for name in free_vars] if not is_while_loop: # iter var increment implicit in torch, so do it manually # for while loop, block_outputs[0] is already a boolean, # the result of termination check incr = _expr.const(1, dtype="int32") block_outputs[0] = current_vals[0] + incr return block_outputs loop = while_loop(cond, [loop_iter_var] + loop_vars, body) loop_val = loop(init_loop_iter_val, *init_vals) # restore original output values for free vars outputs.update(prev_outputs) # The first element is a loop counter or boolean condition, ignore it return [_expr.TupleGetItem(loop_val, i + 1) for i in range(num_loop_var)] def convert_operators(self, operators, outputs, ret_names): """Convert each Torch IR operators to Relay equivalent""" # an op node might not belong to any of scope in trace info natively # use a cunter to prevent from messing up its scope in span empty_counter = 0 for node_name, op_node in operators: operator = op_node.kind() inputs = _get_op_inputs(op_node, outputs) if operator == "prim::Constant": outputs[node_name] = _get_constant(op_node) elif operator == "prim::ListConstruct" and _should_construct_dynamic_list(op_node): outputs[node_name] = self.convert_to_list_adt(inputs) elif operator == "prim::ListConstruct": # This assumes that no more elements will be appended to this list # In this case, we keep the Python list outputs[node_name] = inputs elif operator == "prim::TupleConstruct": outputs[node_name] = _expr.Tuple(inputs) elif operator in ["prim::ListUnpack", "prim::TupleUnpack"]: assert len(inputs) == 1 if isinstance(inputs[0], (list, _expr.TupleWrapper)): unpacked = inputs[0] else: unpacked = _unpack_tuple(inputs[0]) outputs.update(zip(_get_output_names(op_node), unpacked)) elif operator == "prim::prim::RaiseException": logger.warning("raising exceptions is ignored") outputs[node_name] = None elif operator == "prim::If": if_out = self.convert_if(op_node, outputs) outputs[node_name] = if_out elif operator == "prim::Loop": loop_out = self.convert_loop(op_node, outputs) unpacked_names = _get_output_names(op_node) assert len(loop_out) == len(unpacked_names) outputs.update(zip(unpacked_names, loop_out)) else: if operator not in self.convert_map: # At this point, the only possible ops that are not in convert_map are # in-place variant of ops like aten::relu_ assert operator.endswith("_") logger.warning( "An in-place op %s found, the result will not be correct " "if the model depends on side-effects by this op.", operator, ) relay_op = self.convert_map[operator[:-1]] else: relay_op = self.convert_map[operator] relay_out = relay_op( inputs, _get_input_types(op_node, outputs, default_dtype=self.default_dtype) ) span_str, empty_counter = self._get_torch_span(op_node, empty_counter) relay_out = set_span(relay_out, span_str) self.record_output_type(relay_out) if isinstance(relay_out, tuple): # This is for torch operators that return multiple outputs # See _adaptive_max_2d above for example out_names = _get_output_names(op_node) outputs.update(zip(out_names, relay_out)) else: assert op_node.outputsSize() == 1 outputs[node_name] = relay_out return [_wrap_const(outputs[ret_name]) for ret_name in ret_names] def _get_torch_span(self, node, empty_counter): # torch span looks like # %input.5 : Float(...) = aten::relu_(%input.3), scope: __module.relu # ${torch}/nn file # the scope part might not exist if node.scopeName(): scope_name_str = "jit._trace.TopLevelTracedModule: " + node.scopeName() else: scope_name_str = "warning: no trace info " + str(empty_counter) empty_counter += 1 span_str = "C.graph: {}, {}".format(node.kind(), scope_name_str) return span_str, empty_counter def _pytorch_result_type(dtypes, non_tensor_inputs): """This promotes TVM dtypes like PyTorch would""" import torch dtype_map = { "float64": torch.float64, "float32": torch.float32, "float16": torch.float16, "bfloat16": torch.bfloat16, "int64": torch.int64, "int32": torch.int32, "int16": torch.int16, "int8": torch.int8, "uint8": torch.uint8, "bool": torch.bool, } if len(dtypes) > 0: result_type = dtypes[0] for dt in dtypes[1:]: if dt != result_type: # we don't want to work with same types as we # don't do quantized here (which cannot be promoted?) result_type = _convert_data_type( str( torch.result_type( torch.zeros((), dtype=dtype_map[result_type]), torch.zeros((), dtype=dtype_map[dt]), ) ) ) else: result_type = "bool" # this is the smallest type... for inp in non_tensor_inputs: result_type = _convert_data_type( str(torch.result_type(torch.zeros((), dtype=dtype_map[result_type]), inp)) ) return result_type # Helper functions for operator implementation def _convert_dtype_value(val): """converts a PyTorch the PyTorch numeric type id to a torch scalar type.""" convert_torch_dtype_map = { 7: "torch.float64", 6: "torch.float32", 5: "torch.float16", 4: "torch.int64", 3: "torch.int32", 2: "torch.int16", 1: "torch.int8", 0: "torch.unit8", None: "torch.int64", } # Default is torch.int64 if val in convert_torch_dtype_map: return _convert_data_type(convert_torch_dtype_map[val]) else: msg = "Torch data type value %d is not handled yet." % (val) raise NotImplementedError(msg) def _convert_data_type(input_type, default_dtype=None): """converts the PyTorch scalar type input_type to a TVM dtype. optionally, default_dtype can be a TVM dtype that is used if input_type is None (but not when it is unknown)""" if input_type is None and default_dtype is not None: return default_dtype input_type = input_type.lower() if input_type in ["double", "float64", "torch.float64"]: return "float64" elif input_type in ["float", "float32", "torch.float32"]: return "float32" elif input_type in ["half", "float16", "torch.float16"]: return "float16" elif input_type in ["long", "int64", "torch.int64"]: return "int64" elif input_type in ["int", "int32", "torch.int32"]: return "int32" elif input_type in ["short", "int16", "torch.int16"]: return "int16" elif input_type in ["char", "int8", "torch.int8"]: return "int8" elif input_type in ["byte", "uint8", "torch.uint8"]: return "uint8" elif input_type in ["quint8", "torch.quint8"]: return "quint8" elif input_type in ["qint8", "torch.qint8"]: return "qint8" elif input_type in ["qint32", "torch.qint32"]: return "qint32" elif input_type in ["bool", "torch.bool"]: return "bool" elif input_type in ["str"]: return "str" else: raise NotImplementedError("input_type {} is not handled yet".format(input_type)) return "float32" # Never reached def _create_typed_const(data, dtype): """create a (scalar) constant of given value and dtype. dtype should be a TVM dtype""" if dtype == "float64": typed_data = _expr.const(np.float64(data), dtype=dtype) elif dtype == "float32": typed_data = _expr.const(np.float32(data), dtype=dtype) elif dtype == "float16": typed_data = _expr.const(np.float16(data), dtype=dtype) elif dtype == "int64": typed_data = _expr.const(np.int64(data), dtype=dtype) elif dtype == "int32": typed_data = _expr.const(np.int32(data), dtype=dtype) elif dtype == "int16": typed_data = _expr.const(np.int16(data), dtype=dtype) elif dtype == "int8": typed_data = _expr.const(np.int8(data), dtype=dtype) elif dtype == "uint8": typed_data = _expr.const(np.uint8(data), dtype=dtype) else: raise NotImplementedError("input_type {} is not handled yet".format(dtype)) return typed_data def _wrap_const(c): if not isinstance(c, (_expr.Expr, list, tvm.tir.expr.Any)): return _expr.const(c) return c def _run_jit_passes(graph): """The inline pass is necessary to unwrap prim::CallMethod""" # pylint: disable=c-extension-no-member import torch if is_version_greater_than("1.5.1"): # This is required for torchvision detection models from 1.6 above # It is the same as _jit_pass_inline, except that it has some special # case behaviors for some ops such as aten::__interpolate() torch._C._jit_pass_onnx_function_substitution(graph) else: torch._C._jit_pass_inline(graph) def _get_tensor_and_var(torch_tensor, name): tensor = tvm.nd.array(torch_tensor.cpu().numpy()) var = _expr.var(name, shape=tensor.shape, dtype=tensor.dtype) return tensor, var def _get_output_name(node): assert node.outputsSize() == 1 return node.output().debugName() def _get_output_names(node): return [output.debugName() for output in node.outputs()] def _get_input_names(node_or_graph): return [inp.debugName() for inp in node_or_graph.inputs()] def _get_op_inputs(op_node, outputs): return [outputs[name] for name in _get_input_names(op_node)] def _get_node_type(node): assert node.outputsSize() == 1 return node.output().type().kind() def _get_uses(node): uses = [] for output in node.outputs(): uses += output.uses() return uses def _get_users(node): return [use.user for use in _get_uses(node)] def _getattr_attr_name(node): attribute_names = node.attributeNames() assert len(attribute_names) == 1 attr_name = node.s(attribute_names[0]) return attr_name def _getattr_full_name(getattrs, sep="."): return sep.join([_getattr_attr_name(node) for node in getattrs]) def _get_pytorch_value_type(typ, default_dtype="float32"): kind = typ.kind() if kind == "TensorType": if typ.scalarType() is None: # Tensor's type can be unknown if we use torch.jit.script(...) # Defaults can be passed in, if not it is float32 logger.warning("Untyped Tensor found, assume it is %s", default_dtype) return default_dtype else: return _convert_data_type(typ.scalarType()) elif kind == "ListType": return "ListType" elif kind in ["IntType", "FloatType", "BoolType", "StringType", "OptionalType"]: pt_dtype = str(typ).lower() dtype = pt_dtype if pt_dtype == "OptionalType" else _convert_data_type(pt_dtype) return dtype else: return "UnsupportedType" def _get_input_types(op_node, outputs, default_dtype="float32"): """Returns a TVM dtype for each input nodes derived from the torch type""" in_types = [] for inp in op_node.inputs(): if inp.node().kind() == "prim::GetAttr": # GetAttr nodes always return None when we call scalarType() on it name = inp.debugName() assert name in outputs if isinstance(outputs[name], _expr.Var): in_types.append(outputs[name].type_annotation.dtype) else: # For quantized modules with parameters, here we would get # "prim::GetAttr[name="_packed_params"]". Since the dtype corresponding to # _packed_params is not needed by quantized ops, we return an arbitrary type. in_types.append(default_dtype) else: in_types.append(_get_pytorch_value_type(inp.type(), default_dtype=default_dtype)) return in_types def _get_constant(node): """Retrieve a constant associated with this prim::Constant node""" attribute_names = node.attributeNames() num_attributes = len(attribute_names) if num_attributes == 1: attr_name = attribute_names[0] ty = node.output().type().kind() if ty == "IntType": return node.i(attr_name) elif ty == "BoolType": return bool(node.i(attr_name)) elif ty in ["FloatType", "LongType"]: return node.f(attr_name) elif ty in ["TensorType", "CompleteTensorType"]: tensor = node.t(attr_name) if tensor.is_cuda: tensor = tensor.cpu() if len(tensor.shape) == 0: # tensor(0.1) # TODO(t-vi): When is this needed? return tensor.item() return _wrap_const(tensor.numpy()) elif ty in ["DeviceObjType", "StringType"]: return node.s(attr_name) elif ty == "FunctionType": return None else: raise NotImplementedError("Unsupported type: %s" % ty) else: assert num_attributes == 0 return None def _get_operator_nodes(nodes): """Returns torch IR nodes that need conversion to Relay""" ops = [] # Traverse nodes and add to graph for node in nodes: if node.outputsSize() == 0: continue if node.outputsSize() > 1: node_name = "_".join(_get_output_names(node)) else: node_name = _get_output_name(node) if node.kind() != "prim::GetAttr": ops.append((node_name, node)) return ops def _get_relay_input_vars(graph, input_infos, prelude, is_module=True, default_dtype="float32"): """ Return Relay vars from input shapes and create entries based on expected graph inputs - to allow translation """ graph_inputs = list(graph.inputs()) if is_module: # a module has "self" as first input, which we do not need/want graph_inputs = graph_inputs[1:] if not isinstance(input_infos, list): msg = "Graph inputs input_infos should be a list" raise RuntimeError(msg) if len(graph_inputs) != len(input_infos): msg = "PyTorch has {} inputs and input_infos lists {}.".format( len(graph_inputs), len(input_infos) ) raise RuntimeError(msg) def get_relay_ty(ishape, itype, pt_type): if pt_type.kind() == "TensorType": if not (_is_int_seq(ishape) or len(ishape) == 0): msg = "Shape for Tensors must be lists of ints" raise RuntimeError(msg) if (pt_type.dim() is not None and pt_type.dim() != len(ishape)) or ( pt_type.sizes() is not None and any([s1 != s2 for s1, s2 in zip(pt_type.sizes(), ishape)]) ): msg = "Shapes of input list and information in the graph do not match" raise RuntimeError(msg) pt_dtype = pt_type.scalarType() if not pt_dtype and itype: pt_dtype = itype dtype = _convert_data_type(pt_dtype, default_dtype=default_dtype) return TensorType(ishape, dtype) elif pt_type.kind() == "TupleType": if not isinstance(ishape, tuple): msg = "Shapes for tuples must be tuples" raise RuntimeError(msg) return TupleType( [get_relay_ty(elem, itype, pt_t) for elem, pt_t in zip(ishape, pt_type.elements())] ) elif pt_type.kind() == "ListType": if not isinstance(ishape, list): msg = "Shapes for lists must be lists" raise RuntimeError(msg) pt_elemtype = pt_type.getElementType() elem_tys = [get_relay_ty(s, itype, pt_elemtype) for s in ishape] if len(elem_tys) > 0 and not all(map(lambda ty: ty == elem_tys[0], elem_tys)): msg = "List elements need have identical types" raise RuntimeError(msg) rlist, _, _ = prelude.mod.get_type("List") return rlist(elem_tys[0]) elif pt_type.kind() == "OptionalType": # we do not support None yet, so we fill in the type return get_relay_ty(ishape, itype, pt_type.getElementType()) # TODO: scalar inputs raise NotImplementedError("unsupported input type") input_vars = {} new_input_infos = [] for num, inp in enumerate(input_infos): if not isinstance(inp, tuple): msg = "Graph input {} is not a tuple".format(num) raise RuntimeError(msg) if len(inp) != 2 or not isinstance(inp[0], str): msg = ( "Graph input {} is not valid," " expected ('name', shape) or ('name', (shape, dtype))".format(inp) ) raise RuntimeError(msg) if not isinstance(inp[1], tuple) or len(inp[1]) == 0 or not isinstance(inp[1][-1], str): new_input_infos.append((inp[0], (inp[1], default_dtype))) else: new_input_infos.append(inp) input_types = [ (name, get_relay_ty(info[0], info[1], gi.type())) for (name, info), gi in zip(new_input_infos, graph_inputs) ] ir_inputs = [i.debugName() for i in graph_inputs] for ir_input, (name, itype) in zip(ir_inputs, input_types): inp = _expr.var(name, type_annotation=itype) # Translate from graph input to user input name input_vars[ir_input] = inp return input_vars def _unpack_tuple(tup): def unpack(tup, num_fields): return [_expr.TupleGetItem(tup, i) for i in range(num_fields)] if isinstance(tup, _expr.Tuple): return unpack(tup, len(tup.fields)) elif isinstance(tup.type_annotation, TupleType): return unpack(tup, len(tup.type_annotation.fields)) # shouldn't happen assert False def _get_free_vars_from_block(block): block_inp_names = _get_input_names(block) bound_names = block_inp_names free_vars = set() for node in block.nodes(): inp_names = _get_input_names(node) list_diff = [name for name in inp_names if name not in bound_names] free_vars.update(list_diff) bound_names += _get_output_names(node) return free_vars def get_use_chains(root_node, terminate=lambda _: False): """ Track a chain of users of this node forward, returning a list of chains See get_attr_chains below for its usage """ def concat_lists(lists): return itertools.chain.from_iterable(lists) def inner(current, accum): users = _get_users(current) if not users or terminate(users): return [accum] return concat_lists([inner(nxt, accum + [nxt]) for nxt in users]) return inner(root_node, [root_node]) def get_attr_chains(root_getattr_node): """Returns chains of attribute access starting from root_getattr_node For example, given attribute "block", as in "self.block" when "self" points to the top level torch.nn.Module, it returns lists of attribute "chains", e.g. ['block', '2'], ['block', '1'], ['block', '0', '_packed_params'] These sets of attributes form full attribute accessors. For example, "self.block.1", "self.block.2" will return the second and third submodule, and "self.block.0._packed_params" will return the parameters of the first submodule. """ def terminate(users): next_attrs = [user for user in users if user.kind() == "prim::GetAttr"] return len(next_attrs) == 0 return get_use_chains(root_getattr_node, terminate) def convert_params(graph, state_dict, use_parser_friendly_name=False): """ Return Relay vars and TVM NDArrays for input parameters A chain of prim::GetAttr nodes is processed one at a time """ getattr_nodes = graph.findAllNodes("prim::GetAttr", recurse=True) params = {} param_tensors = {} packed_param_map = {} vars_by_name = {} seen = set() attr_name_sep = "_" if use_parser_friendly_name else "." for node in getattr_nodes: if _get_output_name(node) in seen: continue for getattrs in get_attr_chains(node): seen.update(map(_get_output_name, getattrs)) full_attr = _getattr_full_name(getattrs, attr_name_sep) full_attr_node_name = _get_output_name(getattrs[-1]) if full_attr.endswith("_packed_params"): # for quantized models packed_param_map[full_attr_node_name] = full_attr elif full_attr in state_dict: if full_attr in vars_by_name: var = vars_by_name[full_attr] else: torch_tensor = state_dict[full_attr] tensor, var = _get_tensor_and_var(torch_tensor, full_attr) param_tensors[full_attr] = tensor vars_by_name[full_attr] = var params[full_attr_node_name] = var return params, param_tensors, packed_param_map def get_all_op_names(graph): """Return all operator names in the input graph""" nodes = list(graph.nodes()) prim_with_blocks = ["prim::If", "prim::Loop"] for prim in prim_with_blocks: prim_nodes = graph.findAllNodes(prim, recurse=True) for prim_node in prim_nodes: for block in prim_node.blocks(): nodes += block.nodes() return set(node.kind() for node in nodes) def from_pytorch( script_module, input_infos, custom_convert_map=None, default_dtype="float32", use_parser_friendly_name=False, keep_quantized_weight=False, ): """Load PyTorch model in the form of a scripted PyTorch model and convert into relay. The companion parameters will be handled automatically. Parameters ---------- script_module : TopLevelTracedModule object TorchScripted PyTorch graph Note: We currently only support traces (ie: torch.jit.trace(model, input)) input_infos : List of tuples Can be (input name, input shape) or (input name, (input shape, input types)) Graph level input shape and type list The same input names need to be used for deployment, so choose easy to remember names (such as: input0, input1) e.g. [('input0', (1, 2)), ('input1', (3, 4))] or [('input0', ((1, 2), 'int')), ('input1', ((3, 4), 'float'))] custom_convert_map : Dictionary of str to Relay op A custom op conversion map in the same format as _convert_map above default_type : str The default dtype to use when type information is not provided by PyTorch. use_parser_friendly_name : bool When True, replace '.' with `_' in a original parameter name. The Relay text parser treats a variable name followed by a period as a tuple element access, so a variable name like "dense.weight" cannot be parsed correctly. Use this option when you want to run the AnnotateSpans pass on the imported module. keep_quantized_weight : bool Return quantized weights and bias, rather than float ones. PyTorch stores quantized weights in a custom format, so we cannot directly access 8 bit weights as Numpy arrays. We use a PyTorch function to unpack quantized weights into float32 arrays and quantization parameters. By default, we return float32 weights and rely on the QNN lowering and the Relay constant folding pass to quantize weights at compile time. In BYOC use cases, however, we cannot apply the constant folding pass on a QNN graph. If keep_quantized_weight is True, we quantize weights in the frontend using a function that is equivalent to qnn.op.quantize(...) operating on Numpy arrays. Returns ------- mod : tvm.IRModule The module that optimizations will be performed on. params : dict of str to tvm.runtime.NDArray Dict of converted parameters stored in tvm.runtime.ndarray format """ import torch mod = tvm.IRModule() prelude = Prelude(mod) converter = PyTorchOpConverter(prelude, default_dtype) graph = script_module.graph.copy() _run_jit_passes(graph) if custom_convert_map: converter.update_convert_map(custom_convert_map) op_names = get_all_op_names(graph) converter.report_missing_conversion(op_names) is_module = isinstance(script_module, torch.jit.ScriptModule) params = script_module.state_dict() if is_module else {} outputs = _get_relay_input_vars( graph, input_infos, prelude, default_dtype=default_dtype, is_module=is_module ) if use_parser_friendly_name: new_names = [key.replace(".", "_") for key in params.keys()] params = dict(zip(new_names, params.values())) param_vars, tensors, packed_param_map = convert_params(graph, params, use_parser_friendly_name) tvm_params = {k: tvm.nd.array(v) for k, v in tensors.items()} outputs.update(param_vars) ret_name = _get_input_names(graph.return_node()) # For quantized models quantized_ops = set(["aten::quantize_per_tensor", "quantized::linear_dynamic"]) if len(quantized_ops.intersection(set(op_names))) > 0: weight_quant_params = qnn_torch.get_weight_quant_params( script_module, packed_param_map.values() ) input_scales_for_bias = qnn_torch.add_input_quant_params_to_op_inputs(graph) qnn_torch.add_quant_params_to_outputs( outputs, packed_param_map, weight_quant_params, input_scales_for_bias, keep_quantized_weight, ) qnn_torch.add_quant_params(tvm_params, weight_quant_params) converter.update_convert_map(qnn_torch.convert_map) ret = converter.convert_operators(_get_operator_nodes(graph.nodes()), outputs, ret_name)[0] if isinstance(ret, list): # ListConstruct kept original python list. Convert to tuple. ret = _expr.Tuple(ret) # Separate data inputs and parameters to make sure data inputs come first. func_args = [] data_inputs = [] for arg in _analysis.free_vars(ret): if arg.name_hint not in tvm_params.keys(): data_inputs.append(arg) else: func_args.append(arg) func_args = data_inputs + func_args mod["main"] = tvm.relay.Function(func_args, ret) return transform.RemoveUnusedFunctions()(mod), tvm_params
apache-2.0
Laurawly/tvm-1
python/tvm/relay/frontend/onnx.py
1
190047
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. # pylint: disable=invalid-name, import-self, len-as-condition, unused-argument, too-many-lines # pylint: disable=import-outside-toplevel """ONNX: Open Neural Network Exchange frontend for Relay.""" import copy import warnings from typing import Optional import numpy as np import tvm from tvm import relay from tvm.ir import IRModule from tvm.topi.utils import get_const_tuple from ... import nd as _nd from .. import analysis from .. import expr as _expr from .. import function as _function from .. import loops as _loops from .. import op as _op from .. import qnn as _qnn from .. import random as _random from .. import ty as _ty from .. import vision as _vision from .common import ( autopad, AttrCvt, Renamer, ensure_scalar_shape, fold_constant, get_name, get_relay_op, gru_cell, infer_channels, infer_shape, infer_type, infer_value, lstm_cell, new_var, shape_of, try_resolve_var_to_const, unbind, ) __all__ = ["from_onnx"] # The default configurations of Relay ONNX frontend. ONNX_DEFAULT_CONFIGS = { # By default, TVM converts qualified onnx `matmul` to `transpose(weight) + nn.batch_matmul_NT`. # Change this flag to False to directly convert to `nn.batch_matmul`. # Note that `nn.batch_matmul` with format other than NT is in experimental, it may have some # performance issues. "use_nt_batch_matmul": True, } class onnx_input(list): """A helper extension to list that returns None for out of bound indices.""" def __getitem__(self, item): if isinstance(item, slice): if item.stop is None: stop = len(self) else: stop = item.stop indices = list(range(stop)[item]) return [self[i] for i in indices] if isinstance(item, int): return list(self)[item] if item < len(self) else None raise TypeError("list indices must be integers or slices, not %s" % type(item).__name__) def get_numpy(tensor_proto): """Grab data in TensorProto and convert to numpy array.""" try: from onnx.numpy_helper import to_array except ImportError as e: raise ImportError("Unable to import onnx which is required {}".format(e)) return to_array(tensor_proto) def get_type(elem_type): """Converts onnx integer datatype to numpy datatype""" try: from onnx.mapping import TENSOR_TYPE_TO_NP_TYPE except ImportError as e: raise ImportError("Unable to import onnx which is required {}".format(e)) return str(TENSOR_TYPE_TO_NP_TYPE[elem_type]) def get_info(info_proto): """Extract the shape from a ValueInfoProto.""" shape = [] shape_name = [] for dim in info_proto.type.tensor_type.shape.dim: name = dim.dim_param value = dim.dim_value if value is None or value == 0: value = _ty.Any() shape_name.append(name) else: shape_name.append(value) shape.append(value) name = info_proto.name if info_proto.type.tensor_type.elem_type: dtype = get_type(info_proto.type.tensor_type.elem_type) else: dtype = None return name, shape, dtype, shape_name def dimension_picker(prefix, suffix=""): """Check that dimensions are supported.""" def _impl(attr): kernel = attr["kernel_shape"] if len(kernel) == 1: return prefix + "1d" + suffix if len(kernel) == 2: return prefix + "2d" + suffix if len(kernel) == 3: return prefix + "3d" + suffix msg = "Only 1D, 2D, and 3D kernels are supported for operator {}." op_name = prefix + "1d/2d/3d" raise tvm.error.OpAttributeInvalid(msg.format(op_name)) return _impl def revert_caffe2_pad(pads): """Caffe2 requires two times the normal padding.""" if len(pads) == 4: pads = pads[:2] elif len(pads) == 2: pass else: raise tvm.error.OpAttributeInvalid("Number of pads must be either 2 or 4.") return pads def get_pad_pair(input1d, kernel1d, stride1d, mode): """infer pad size""" if input1d % stride1d == 0: pad = max(kernel1d - stride1d, 0) else: pad = max(kernel1d - (input1d % stride1d), 0) pad_before = pad // 2 pad_after = pad - pad_before if "LOWER" in mode: return [pad_after, pad_before] return [pad_before, pad_after] def onnx_default_layout(dims, op_name): if dims == 1: return "NCW" if dims == 2: return "NCHW" if dims == 3: return "NCDHW" msg = "Only 1D, 2D and 3D layouts are currently supported for operator {}." raise tvm.error.OpAttributeInvalid(msg.format(op_name)) def onnx_storage_order2layout(storage_order, dims, op_name): """converter of onnx storage order parameter to tvm storage order format""" if storage_order not in (0, 1): raise tvm.error.OpAttributeInvalid("Mode of storage_order must be either 0 or 1") if dims == 1: return "NCW" if storage_order == 0 else "NWC" if dims == 2: return "NCHW" if storage_order == 0 else "NHWC" if dims == 3: return "NCDHW" if storage_order == 0 else "NDHWC" msg = "Only 1D, 2D and 3D layouts are currently supported for operator {}." raise tvm.error.OpAttributeInvalid(msg.format(op_name)) def dimension_constraint(): def _dim_check(attrs): if len(attrs["kernel_shape"]) in [1, 2, 3]: return True return False return _dim_check, "Only 1d, 2d and 3d kernel supported." def get_scalar(x, params, dtype="float32"): """Helper to get a scalar value for Quantized operators.""" if isinstance(x, _expr.Var) and x.name_hint in params: return _op.const(params[x.name_hint].numpy(), dtype) rank = len(infer_shape(x)) assert rank <= 1, "scale and zero_point input must be scalars" if rank == 1: x = _op.squeeze(x, [0]) return _op.cast(x, dtype) def matmul_out_dtype(inputs, out_dtype): """Common function to handle MatMul and MatMulInteger16""" a_shape = shape_of(inputs[0]) a_rank = infer_shape(a_shape)[0] b_shape = shape_of(inputs[1]) b_rank = infer_shape(b_shape)[0] if a_rank > 2 or b_rank > 2: def flatten_to_nd(x, x_shape, nd=3): ndims = infer_shape(x_shape)[0] if ndims == nd: return x newshape = _op.concatenate( [ _expr.const([-1], dtype=infer_type(x_shape).checked_type.dtype), _op.strided_slice(x_shape, [ndims - nd + 1], [ndims]), ], 0, ) out = _op.reshape(x, fold_constant(newshape)) return out b_type = infer_type(inputs[1]) # Convert to dense if the second matrix is 2d and non-dynamic if b_rank == 2 and not _ty.is_dynamic(b_type.checked_type): a = flatten_to_nd(inputs[0], a_shape, 2) b = _op.transpose(inputs[1]) output = _op.nn.dense(a, b, out_dtype=out_dtype) else: # Convert a and b into 3 dimensional tensors. a = flatten_to_nd(inputs[0], a_shape, 3) b = flatten_to_nd(inputs[1], b_shape, 3) # Perform a NN batch matmul. output = _op.nn.batch_matmul(a, b, out_dtype=out_dtype, transpose_b=False) # Determine the output batch dimension. if a_rank > b_rank: out_batch = _op.strided_slice(a_shape, [0], [a_rank - 2]) elif a_rank < b_rank: out_batch = _op.strided_slice(b_shape, [0], [b_rank - 2]) # If its unclear how broadcasting should be applied, the output # shape is determined by choosing the maximum value from each input. else: out_batch = _op.concatenate( [ _op.maximum( _op.strided_slice(a_shape, [i], [i + 1]), _op.strided_slice(b_shape, [i], [i + 1]), ) for i in range(a_rank - 2) ], 0, ) # Reshape output to original dimensions. final_shape = _op.concatenate( [ out_batch, _op.strided_slice( a_shape, [infer_shape(a_shape)[0] - 2], [infer_shape(a_shape)[0] - 1] ), _op.strided_slice( b_shape, [infer_shape(b_shape)[0] - 1], [infer_shape(b_shape)[0]] ), ], 0, ) return _op.reshape(output, fold_constant(final_shape)) # Otherwise a simple dense op will get the job done. input_1_t = _op.transpose(inputs[1], axes=(1, 0)) return _op.nn.dense(inputs[0], input_1_t, out_dtype=out_dtype) class OnnxOpConverter(object): """A helper class for holding onnx op converters.""" @classmethod def get_converter(cls, opset): """Get converter matches given opset. Parameters ---------- opset: int opset from model. Returns ------- converter, which should be `_impl_vx`. Number x is the biggest number smaller than or equal to opset belongs to all support versions. """ versions = [int(d.replace("_impl_v", "")) for d in dir(cls) if "_impl_v" in d] versions = sorted(versions + [opset]) version = versions[max([i for i, v in enumerate(versions) if v == opset]) - 1] if hasattr(cls, "_impl_v{}".format(version)): return getattr(cls, "_impl_v{}".format(version)) raise NotImplementedError( "opset version {} of {} not implemented".format(version, cls.__name__) ) class Unary(OnnxOpConverter): """A helper class for unary op converters.""" name = "" @classmethod def _impl_v1(cls, inputs, attr, params): assert len(inputs) == 1, "Unary math op {} takes 1 input, {} given".format( cls.name, len(inputs) ) op_name = cls.name return get_relay_op(op_name)(*inputs) class Elemwise(OnnxOpConverter): """A helper class for elemwise op converters.""" name = "" @classmethod def _impl_v1(cls, inputs, attr, params): assert len(inputs) == 2, "Math op {} take 2 inputs, {} given".format(cls.name, len(inputs)) op_name = cls.name conv_ops = ["conv2d", "conv2d_transpose"] if attr.get("broadcast", 0) and any(x in str(inputs[0]) for x in conv_ops): # TODO(zhreshold): remove hard coded infershape axis = int(attr.get("axis", 0)) inputs[1] = _op.expand_dims(inputs[1], axis=axis, num_newaxis=2) return get_relay_op(op_name)(*inputs) class Pool(OnnxOpConverter): """A helper class for pool op converters.""" name = "" @classmethod def _impl_v1(cls, inputs, attr, params): attr_cvt, data = cls._run_calculation(inputs, attr, params) return attr_cvt([data], attr, params) @classmethod def _run_calculation(cls, inputs, attr, params): """Helper method to return the processed input data and AttrCvt object""" data = inputs[0] input_shape = infer_shape(data) input_dtype = infer_type(data).checked_type.dtype ndim = len(input_shape) if "auto_pad" in attr: attr["auto_pad"] = attr["auto_pad"].decode("utf-8") if attr["auto_pad"] in ("SAME_UPPER", "SAME_LOWER"): if cls.name == "avg_pool": pad_tuple = [] for axis in range(len(input_shape) - 2): axis_shape = input_shape[2 + axis] stride = attr.get("strides", [1] * ndim)[axis] kernel = attr["kernel_shape"][axis] pad = get_pad_pair(axis_shape, kernel, stride, attr["auto_pad"]) pad_tuple.append(pad) pad_tuple = tuple([val for pair in zip(*pad_tuple) for val in pair]) attr["pads"] = pad_tuple else: # Warning: Pool does not yet support dynamic shapes, # one will need to run dynamic_to_static on this model after import if "int" in input_dtype: pad_val = np.iinfo(np.dtype(input_dtype)).min else: pad_val = np.finfo(np.dtype(input_dtype)).min data = autopad( data, attr.get("strides", [1] * (ndim - 2)), attr["kernel_shape"], [1] * ndim, pad_value=pad_val, mode=attr["auto_pad"], ) elif attr["auto_pad"] == "VALID": attr["pads"] = tuple([0 for i in range(ndim - 2)]) elif attr["auto_pad"] == "NOTSET": pass else: msg = 'Value {} in attribute "auto_pad" of operator {} is invalid.' raise tvm.error.OpAttributeInvalid(msg.format(attr["auto_pad"], cls.name)) attr.pop("auto_pad") if "storage_order" in attr: attr["layout"] = onnx_storage_order2layout( attr["storage_order"], dims=(len(input_shape) - 2), op_name=cls.name ) else: attr["layout"] = onnx_default_layout(dims=(len(input_shape) - 2), op_name=cls.name) return ( AttrCvt( op_name=dimension_picker(cls.name), transforms={ "kernel_shape": "pool_size", "pads": ("padding", 0), "dilations": ("dilation", 1), }, ignores=["storage_order"], custom_check=dimension_constraint(), ), data, ) class Absolute(Unary): """Operator converter for Absolute.""" name = "abs" class Add(Elemwise): """Operator converter for Add.""" name = "add" class AveragePool(Pool): """Operator converter for AveragePool.""" name = "avg_pool" class QLinearAveragePool(Pool): """Operator converter for QLinearAveragePool from Microsoft onnxruntime contrib opset.""" name = "avg_pool" @classmethod def _impl_v1(cls, inputs, attr, params): x_scale = get_scalar(inputs[1], params) x_zero_point = get_scalar(inputs[2], params, dtype="int32") y_scale = fold_constant(get_scalar(inputs[3], params)) y_zero_point = get_scalar(inputs[4], params, dtype="int32") attr_cvt, data = cls._run_calculation(inputs, attr, params) input_dtype = infer_type(data).checked_type.dtype # Onnxruntime doesn't actually do this op in integer, they dequantize to fp32 # and then requantize afer (according to documentation below) # https://github.com/microsoft/onnxruntime/blob/master/docs/ContribOperators.md#com.microsoft.QLinearAveragePool float_node = _qnn.op.dequantize(data, x_scale, x_zero_point) out = attr_cvt([float_node], attr, params) return _qnn.op.quantize(out, y_scale, y_zero_point, out_dtype=input_dtype) class BatchNorm(OnnxOpConverter): """Operator converter for BatchNorm.""" @classmethod def _impl_v1(cls, inputs, attr, params): # TODO(zhreshold): 'spatial' is not properly handled here. # TODO(vvchernov): 'training_mode' (onnx tag) is not correctly handled, ignore for now out = AttrCvt( op_name="batch_norm", ignores=["spatial", "is_test", "consumed_inputs", "momentum", "training_mode"], )(inputs, attr, params) # We only support test mode, so we return data, moving_mean, moving_var, # and then moving_mean and moving_var again as placeholders for # the expected "saved_mean", "saved_var". return _expr.TupleWrapper(_expr.Tuple((*out, out[1], out[2])), 5) class InstanceNorm(OnnxOpConverter): """Operator converter for BatchNorm.""" @classmethod def _impl_v1(cls, inputs, attr, params): return AttrCvt(op_name="instance_norm")(inputs, attr, params) class Conv(OnnxOpConverter): """Operator converter for Conv.""" @classmethod def _impl_v1(cls, inputs, attr, params): # Use shape of input to determine convolution type. data = inputs[0] kernel = inputs[1] input_shape = infer_shape(data) ndim = len(input_shape) kernel_type = infer_type(inputs[1]) kernel_shapes = [get_const_tuple(kernel_type.checked_type.shape)] if "kernel_shape" not in attr: attr["kernel_shape"] = kernel_shapes[0][2:] if "auto_pad" in attr: attr["auto_pad"] = attr["auto_pad"].decode("utf-8") if attr["auto_pad"] in ("SAME_UPPER", "SAME_LOWER"): # Warning: Convolution does not yet support dynamic shapes, # one will need to run dynamic_to_static on this model after import data = autopad( data, attr.get("strides", [1] * (ndim - 2)), attr["kernel_shape"], attr.get("dilations", [1] * (ndim - 2)), mode=attr["auto_pad"], ) elif attr["auto_pad"] == "VALID": attr["pads"] = [0 for i in range(ndim - 2)] elif attr["auto_pad"] == "NOTSET": pass else: msg = 'Value {} in attribute "auto_pad" of operator Conv is invalid.' raise tvm.error.OpAttributeInvalid(msg.format(attr["auto_pad"])) attr.pop("auto_pad") attr["channels"] = kernel_shapes[0][0] out = AttrCvt( op_name=dimension_picker("conv"), transforms={ "kernel_shape": "kernel_size", "dilations": ("dilation", 1), "pads": ("padding", 0), "group": ("groups", 1), }, custom_check=dimension_constraint(), )([data, kernel], attr, params) use_bias = len(inputs) == 3 if use_bias: out = _op.nn.bias_add(out, inputs[2]) return out class ConvTranspose(OnnxOpConverter): """Operator converter for ConvTranspose.""" @classmethod def _impl_v1(cls, inputs, attr, params): # get number of channels out_type = infer_type(inputs[1]) out_shapes = [get_const_tuple(out_type.checked_type.shape)] channels = out_shapes[0][1] attr["channels"] = channels groups = attr.get("group", 1) if "kernel_shape" not in attr: attr["kernel_shape"] = out_shapes[0][2:] attr["groups"] = groups # infer pads for auto_pad data = inputs[0] input_shape = infer_shape(data) ndim = len(input_shape) if "auto_pad" in attr: attr["auto_pad"] = attr["auto_pad"].decode("utf-8") if attr["auto_pad"] in ("SAME_UPPER", "SAME_LOWER"): # Warning: Convolution does not yet support dynamic shapes, # one will need to run dynamic_to_static on this model after import data = autopad( data, attr.get("strides", [1] * (ndim - 2)), attr["kernel_shape"], attr.get("dilations", [1] * (ndim - 2)), deconv=True, mode=attr["auto_pad"], ) elif attr["auto_pad"] == "VALID": attr["pads"] = tuple([0 for i in range(ndim - 2)]) elif attr["auto_pad"] == "NOTSET": pass else: msg = 'Value {} in attribute "auto_pad" of operator Conv is invalid.' raise tvm.error.OpAttributeInvalid(msg.format(attr["auto_pad"])) attr.pop("auto_pad") out = AttrCvt( op_name=dimension_picker("conv", "_transpose"), transforms={ "kernel_shape": "kernel_size", "dilations": ("dilation", 1), "pads": ("padding", 0), "group": ("groups", 1), }, disables=["output_shape"], custom_check=dimension_constraint(), )([data, inputs[1]], attr, params) use_bias = len(inputs) == 3 if use_bias: out = _op.nn.bias_add(out, inputs[2]) return out @classmethod def _impl_v11(cls, inputs, attr, params): # get number of channels out_type = infer_type(inputs[1]) out_shapes = [get_const_tuple(out_type.checked_type.shape)] channels = out_shapes[0][1] attr["channels"] = channels groups = attr.get("group", 1) if "kernel_shape" not in attr: attr["kernel_shape"] = out_shapes[0][2:] attr["groups"] = groups # infer pads for auto_pad data = inputs[0] input_shape = infer_shape(data) ndim = len(input_shape) if "auto_pad" in attr: attr["auto_pad"] = attr["auto_pad"].decode("utf-8") if attr["auto_pad"] in ("SAME_UPPER", "SAME_LOWER"): # Warning: Convolution does not yet support dynamic shapes, # one will need to run dynamic_to_static on this model after import kernel_shape = attr["kernel_shape"] kndim = len(kernel_shape) dilations = attr.get("dilations", [1] * kndim) output_padding = attr.get("output_padding", [0] * kndim) strides = attr["strides"] total_pad = [0] * kndim for i in range(kndim): total_pad[i] = ( output_padding[i] + ((kernel_shape[i] - 1) * dilations[i] + 1) - strides[i] ) left = [p // 2 for p in total_pad] right = [total_pad[i] - left[i] for i in range(kndim)] if "LOWER" in attr["auto_pad"]: pad = left + right else: pad = right + left attr["pads"] = pad elif attr["auto_pad"] == "VALID": attr["pads"] = tuple([0 for i in range(ndim - 2)]) elif attr["auto_pad"] == "NOTSET": pass else: msg = 'Value {} in attribute "auto_pad" of operator Conv is invalid.' raise tvm.error.OpAttributeInvalid(msg.format(attr["auto_pad"])) attr.pop("auto_pad") out = AttrCvt( op_name=dimension_picker("conv", "_transpose"), transforms={ "kernel_shape": "kernel_size", "dilations": ("dilation", 1), "pads": ("padding", 0), "group": ("groups", 1), }, disables=["output_shape"], custom_check=dimension_constraint(), )([data, inputs[1]], attr, params) use_bias = len(inputs) == 3 if use_bias: out = _op.nn.bias_add(out, inputs[2]) return out class GlobalAveragePool(OnnxOpConverter): """Operator converter for GlobalAveragePool""" @classmethod def _impl_v1(cls, inputs, attr, params): rank = len(infer_shape(inputs[0])) if rank == 3: return _op.nn.global_avg_pool1d(inputs[0]) if rank == 4: return _op.nn.global_avg_pool2d(inputs[0]) if rank == 5: return _op.nn.global_avg_pool3d(inputs[0]) raise NotImplementedError( "Global average pooling is only implemented for 1D, 2D, and 3D kernels, got %dD." % (rank - 2), ) class QLinearGlobalAveragePool(OnnxOpConverter): "Operator converter for QLinearGlobalAveragePool from Microsoft onnxruntime contrib opset." @classmethod def _impl_v1(cls, inputs, attr, params): rank = len(infer_shape(inputs[0])) x_scale = get_scalar(inputs[1], params) x_zero_point = get_scalar(inputs[2], params, dtype="int32") y_scale = fold_constant(get_scalar(inputs[3], params)) y_zero_point = get_scalar(inputs[4], params, dtype="int32") input_dtype = infer_type(inputs[0]).checked_type.dtype # Onnxruntime documentation does not mention that this global avg_pool should follow the # sequence dequantize -> float op -> quantize, but that is how QLinearAveragePool is done. # # This op also follows the same pattern since qnn op is not available right now. # TODO: Generate QNN op to perform quantized operation instead of dequant -> op -> quant x = _qnn.op.dequantize(inputs[0], x_scale, x_zero_point) if rank == 3: out = _op.nn.global_avg_pool1d(x) elif rank == 4: out = _op.nn.global_avg_pool2d(x) elif rank == 5: out = _op.nn.global_avg_pool3d(x) else: raise NotImplementedError( "Global average pooling is only implemented for 1D, 2D, and 3D kernels, got %dD." % (rank - 2), ) return _qnn.op.quantize(out, y_scale, y_zero_point, out_dtype=input_dtype) class GlobalMaxPool(OnnxOpConverter): """Operator converter for GlobalMaxPool""" @classmethod def _impl_v1(cls, inputs, attr, params): rank = len(infer_shape(inputs[0])) if rank == 3: return _op.nn.global_max_pool1d(inputs[0]) if rank == 4: return _op.nn.global_max_pool2d(inputs[0]) if rank == 5: return _op.nn.global_max_pool3d(inputs[0]) raise NotImplementedError( "Global max pooling is only implemented for 1D, 2D, and 3D kernels, got %dD." % (rank - 2), ) class Div(Elemwise): """Operator converter for Divide.""" name = "divide" class Elu(OnnxOpConverter): """Operator converter for Elu.""" @classmethod def _impl_v1(cls, inputs, attr, params): alpha = float(attr.get("alpha", 1.0)) return _expr.const(-alpha) * _op.nn.relu( _expr.const(1.0) - _op.exp(inputs[0]) ) + _op.nn.relu(inputs[0]) class Gemm(OnnxOpConverter): """Operator converter for Gemm.""" @classmethod def _impl_v1(cls, inputs, attr, params): assert len(inputs) == 3 or len(inputs) == 2, "Gemm op take 2 or 3 inputs, {} given".format( len(inputs) ) input0_state = infer_type(inputs[0]) dtype = input0_state.checked_type.dtype # Y = alpha * A * B + beta * C alpha = float(attr.get("alpha", 1.0)) beta = float(attr.get("beta", 1.0)) transA = int(attr.get("transA", 0)) transB = int(attr.get("transB", 0)) # get number of channels channels = infer_channels(inputs[1], not transB) if transA: inputs[0] = _op.transpose(inputs[0], axes=(1, 0)) if not transB: inputs[1] = _op.transpose(inputs[1], axes=(1, 0)) if len(input0_state.checked_type.shape) != 2: inputs[0] = _op.nn.batch_flatten(inputs[0]) if alpha != 1.0: inputs[0] *= _expr.const(alpha, dtype=dtype) out = _op.nn.dense(inputs[0], inputs[1], units=channels) if len(inputs) == 3: out = out + _expr.const(beta, dtype=dtype) * inputs[2] return out class MatMul(OnnxOpConverter): """Operator converter for MatMul.""" @classmethod def _impl_v1(cls, inputs, attr, params): assert len(inputs) == 2, "MatMul op take 2 inputs, {} given".format(len(inputs)) # Need to check input shape as batch matmul must be supported. return matmul_out_dtype(inputs, out_dtype=infer_type(inputs[0]).checked_type.dtype) class MatMulInteger16(OnnxOpConverter): """Operator converter for MatMulInteger16 from Microsoft onnxruntime contrib opset.""" @classmethod def _impl_v10(cls, inputs, attr, params): assert len(inputs) == 2, "MatMulInteger16 op take 2 inputs, {} given".format(len(inputs)) a_dtype = infer_type(inputs[0]).checked_type.dtype b_dtype = infer_type(inputs[1]).checked_type.dtype # Check input data types assert a_dtype in ("int16", "uint16"), "MatMulInteger16: invalid dtype for first input" assert b_dtype in ("int16", "uint16"), "MatMulInteger16: invalid dtype for second input" out_dtype = "int32" if a_dtype == "uint16" and b_dtype == "uint16": out_dtype = "uint32" return matmul_out_dtype(inputs, out_dtype) class Mod(OnnxOpConverter): """Operator converter for Mod.""" @classmethod def _impl_v1(cls, inputs, attr, params): assert len(inputs) == 2, "Mod op take 2 inputs, {} given".format(len(inputs)) # Note: attr['fmod'] determines whether the operator should behave like np.fmod or np.mod. # attr['fmod'] == 0 will behave as np.mod and attr['fmod'] == 1 will force fmod treatment. # The relay equivalent of np.fmod is relay.mod and np.mod is relay.floor_mod if attr.get("fmod", 0) == 0: op_name = "floor_mod" else: op_name = "mod" return AttrCvt(op_name)(inputs, {}, params) class MaxPool(Pool): """Operator converter for MaxPool""" name = "max_pool" class MaxUnpool(OnnxOpConverter): """Operator converter for MaxUnpool""" @classmethod def _impl_v11(cls, inputs, attr, params): # Unpack inputs and attributes data = inputs[0] data_type = infer_type(data).checked_type.dtype indices = inputs[1] output_shape = inputs[2] kernel_shape = attr.get("kernel_shape") pads = attr.get("pads", None) strides = attr.get("strides", [1] * len(kernel_shape)) # Compute the proper output shape before padding. multiplier = _op.concatenate( [_expr.const([1, 1], dtype="int64"), _expr.const(list(strides), dtype="int64")], axis=0 ) total_output_shape = multiplier * shape_of(data, dtype="int64") # Add extra dimensions from kernel size and stride mismatch total_output_shape += _op.concatenate( [_expr.const([0, 0], "int64"), _expr.const(list(kernel_shape), "int64")], axis=0 ) - _op.concatenate( [_expr.const([0, 0], "int64"), _expr.const(list(strides), "int64")], axis=0 ) # Compute padding amount if output shape is specified. if output_shape is not None: total_output_shape = output_shape elif pads is not None: # Get pads in the proper format for relay. pads = _op.concatenate( [_expr.const([0, 0, 0, 0], "int64"), _expr.const(list(pads), "int64")], axis=0 ) pads = _op.reshape(pads, [-1, 2]) # Compute the total padding per axis. total_pad = _op.sum(pads, axis=-1) # Reversing maxpool means that padding actually makes our output smaller. total_output_shape = total_output_shape - total_pad # Create a tensor of zeros then scatter our data through it. zeros_tensor = _op.zeros(total_output_shape, data_type) # We need to flatten all our tensors before scattering. flat_tensor = _op.scatter( _op.reshape(zeros_tensor, [-1]), _op.reshape(indices, [-1]), _op.reshape(data, [-1]), axis=0, ) # Now reshape back to prepadded shape. output_tensor = _op.reshape(flat_tensor, total_output_shape) return output_tensor class LpPool(OnnxOpConverter): """A helper class for lppool op converters.""" @classmethod def _impl_v1(cls, inputs, attr, params): dtype = infer_type(inputs[0]).checked_type.dtype data = inputs[0] input_shape = infer_shape(data) ndim = len(input_shape) if "auto_pad" in attr: attr["auto_pad"] = attr["auto_pad"].decode("utf-8") if attr["auto_pad"] in ("SAME_UPPER", "SAME_LOWER"): # Warning: LpPool does not yet support dynamic shapes, # one will need to run dynamic_to_static on this model after import data = autopad( data, attr["strides"], attr["kernel_shape"], [1] * ndim, mode=attr["auto_pad"], ) elif attr["auto_pad"] == "VALID": attr["pads"] = tuple([0 for i in range(ndim - 2)]) elif attr["auto_pad"] == "NOTSET": pass else: msg = 'Value {} in attribute "auto_pad" of operator {} is invalid.' raise tvm.error.OpAttributeInvalid(msg.format(attr["auto_pad"], "LpPool")) attr.pop("auto_pad") if "storage_order" in attr: attr["layout"] = onnx_storage_order2layout( attr["storage_order"], dims=(len(input_shape) - 2), op_name="LpPool" ) else: attr["layout"] = onnx_default_layout(dims=(len(input_shape) - 2), op_name="LpPool") p_value = attr.get("p", 2) p = _expr.const(p_value, dtype) reci_p = _expr.const(1.0 / p_value, dtype) data = _op.power(data, p) out = AttrCvt( op_name=dimension_picker("avg_pool"), transforms={"kernel_shape": "pool_size", "pads": ("padding", 0)}, extras={"count_include_pad": True}, ignores=["p"], custom_check=dimension_constraint(), )([data], attr, params) kernels = attr["kernel_shape"] out = _op.abs(out) * _expr.const(np.prod(kernels).astype(dtype)) return _op.power(out, reci_p) class GlobalLpPool(OnnxOpConverter): """Operator converter for GlobalLpPool.""" @classmethod def _impl_v1(cls, inputs, attr, params): # TODO: GlobalLpPool does not yet support dynamic shapes in_shape = infer_shape(inputs[0]) attr["kernel_shape"] = in_shape[2:] return LpPool._impl_v1(inputs, attr, params) class Mul(Elemwise): """Operator converter for Multiply.""" name = "multiply" class Pad(OnnxOpConverter): """Operator converter for Pad.""" @classmethod def _impl_v1(cls, inputs, attr, params): pad_width = [] pads = attr.pop("paddings") dims = int(len(pads) / 2) for i in range(dims): pad_width.append((pads[i], pads[i + dims])) attr["pad_width"] = pad_width pad_mode = attr.get("mode", b"constant").decode("utf-8") if pad_mode in ["constant", "edge", "reflect"]: attr["pad_mode"] = pad_mode attr.pop("mode", None) else: raise tvm.error.OpAttributeInvalid( "Value " + pad_mode + ' in attribute "mode" is invalid for operator Pad.' ) return AttrCvt( _op.nn.pad, transforms={ "value": "pad_value", }, )(inputs, attr, params) @classmethod def _impl_v2(cls, inputs, attr, params): pad_width = [] pads = attr.pop("pads") dims = int(len(pads) / 2) for i in range(dims): pad_width.append((pads[i], pads[i + dims])) attr["pad_width"] = pad_width pad_mode = attr.get("mode", b"constant").decode("utf-8") if pad_mode in ["constant", "edge", "reflect"]: attr["pad_mode"] = pad_mode attr.pop("mode", None) else: raise tvm.error.OpAttributeInvalid( "Value " + pad_mode + ' in attribute "mode" is invalid for operator Pad.' ) return AttrCvt( "pad", transforms={ "value": "pad_value", }, )(inputs, attr, params) @classmethod def _impl_v11(cls, inputs, attr, params): pads = inputs[1] if len(inputs) == 3: value = fold_constant(_op.take(inputs[2], _op.const(0))) else: value = 0.0 pad_width_expr = fold_constant(_op.transpose(_op.reshape(pads, (2, -1)))) pad_mode = attr.get("mode", b"constant").decode("utf-8") if not pad_mode in ["constant", "edge", "reflect"]: raise tvm.error.OpAttributeInvalid( "Value " + pad_mode + ' in attribute "mode" is invalid for operator Pad.' ) return _op.nn.pad(inputs[0], pad_width_expr, value, pad_mode=pad_mode) class ParametricSoftPlus(OnnxOpConverter): """Operator converter for ParametricSoftPlus.""" @classmethod def _impl_v1(cls, inputs, attr, params): alpha = _expr.const(float(attr.get("alpha", 1.0))) beta = _expr.const(float(attr.get("beta", 1.0))) return _op.log(_op.exp(beta * inputs[0]) + _expr.const(1.0)) * alpha class Pow(OnnxOpConverter): """Operator converter for Pow.""" @classmethod def _impl_v13(cls, inputs, attr, params): x = inputs[0] y = inputs[1] x_type = infer_type(x).checked_type.dtype output_type = x_type y_type = infer_type(y).checked_type.dtype if not x_type.startswith("float"): x_type = "float32" x = _op.cast(x, x_type) if x_type != y_type: y = _op.cast(y, x_type) # TODO: come up with good default integer pow() func for common backends result = _op.power(x, y) if x_type != output_type: return _op.cast(result, output_type) return result class Prelu(OnnxOpConverter): """Operator converter for Prelu.""" @classmethod def _impl_v1(cls, inputs, attr, params): assert len(inputs) == 2, "Prelu need 2 inputs, {} given".format(len(inputs)) input_shape = shape_of(inputs[0]) alpha = _op.broadcast_to_like(inputs[1], inputs[0]) alpha = _op.reshape(alpha, [-1]) output = _op.nn.prelu(_op.reshape(inputs[0], [-1]), alpha, axis=0) return _op.reshape(output, input_shape) class Reciprocal(OnnxOpConverter): """Operator converter for Reciprocal.""" @classmethod def _impl_v1(cls, inputs, attr, params): dtype = infer_type(inputs[0]).checked_type.dtype return _expr.const(1.0, dtype=dtype) / inputs[0] class Flatten(OnnxOpConverter): """Operator converter for Flatten.""" @classmethod def _impl_v1(cls, inputs, attr, params): axis = attr.get("axis", 1) ishape = _op.shape_of(inputs[0]) ndim = infer_shape(ishape)[0] if axis < 0: axis = axis + ndim if axis == 1: out = _op.nn.batch_flatten(inputs[0]) else: pre_shape = _op.prod(_op.strided_slice(ishape, [0], [axis], [1]), keepdims=True) post_shape = _op.prod(_op.strided_slice(ishape, [axis], [ndim], [1]), keepdims=True) newshape = _op.concatenate([pre_shape, post_shape], axis=0) out = _op.reshape(inputs[0], newshape) return out class Reshape(OnnxOpConverter): """Operator converter for Reshape.""" @classmethod def _impl_v1(cls, inputs, attr, params): return _op.reshape(inputs[0], attr["shape"]) @classmethod def _impl_v5(cls, inputs, attr, params): if get_name(inputs[1]) in params: shape = tuple(params[inputs[1].name_hint].numpy().astype("int32")) out = _op.reshape(inputs[0], shape) else: out = _op.reshape(*inputs) return out class DepthToSpace(OnnxOpConverter): """Operator converter for DepthToSpace.""" @classmethod def _impl_v11(cls, inputs, attr, params): block_size = int(attr["blocksize"]) mode = attr.get("mode", b"DCR").decode("utf-8") return _op.nn.depth_to_space(inputs[0], block_size, mode=mode) class SpaceToDepth(OnnxOpConverter): """Operator converter for SpaceToDepth.""" @classmethod def _impl_v1(cls, inputs, attr, params): block_size = int(attr["blocksize"]) return _op.nn.space_to_depth(inputs[0], block_size) class Concat(OnnxOpConverter): """Operator converter for Concat.""" @classmethod def _impl_v1(cls, inputs, args, params): return AttrCvt(op_name="concatenate")((inputs,), args) class Scale(OnnxOpConverter): """Operator converter for Scale.""" @classmethod def _impl_v1(cls, inputs, attr, params): scale = float(attr.get("scale", 1.0)) return inputs[0] * _expr.const(scale) class Selu(OnnxOpConverter): """Operator converter for Selu.""" @classmethod def _impl_v1(cls, inputs, attr, params): alpha = float(attr.get("alpha", 1.67326319217681884765625)) gamma = float(attr.get("gamma", 1.05070102214813232421875)) return _expr.const(gamma) * ( _expr.const(-alpha) * _op.nn.relu(_expr.const(1.0) - _op.exp(inputs[0])) + _op.nn.relu(inputs[0]) ) class ScaledTanh(OnnxOpConverter): """Operator converter for ScaledTanh.""" @classmethod def _impl_v1(cls, inputs, attr, params): alpha = float(attr.get("alpha", 1.0)) beta = float(attr.get("beta", 1.0)) return _op.tanh(_expr.const(beta) * inputs[0]) * _expr.const(alpha) class Shrink(OnnxOpConverter): """Operator converter for Shrink.""" @classmethod def _impl_v9(cls, inputs, attr, params): x = inputs[0] dtype = infer_type(x).checked_type.dtype lambd = _op.const(attr.get("lambd", 0.5), dtype=dtype) bias = _op.const(attr.get("bias", 0.0), dtype=dtype) zeros = _op.zeros_like(x) return _op.where(x < -lambd, x + bias, zeros) + _op.where(x > lambd, x - bias, zeros) class Softsign(OnnxOpConverter): """Operator converter for Softsign.""" @classmethod def _impl_v1(cls, inputs, attr, params): return inputs[0] / (_expr.const(1.0) + Absolute.get_converter(1)(inputs, attr, params)) class Sub(Elemwise): """Operator converter for Subtract.""" name = "subtract" class Sum(OnnxOpConverter): """Operator converter for Sum.""" @classmethod def _impl_v1(cls, inputs, attr, params): # Onnx Sum Operator for in_index in range(len(inputs) - 1): inputs[in_index + 1] = _op.add(inputs[in_index], inputs[in_index + 1]) return inputs[len(inputs) - 1] class Affine(OnnxOpConverter): """Operator converter for Affine transformation.""" @classmethod def _impl_v1(cls, inputs, attr, params): alpha = _expr.const(attr.get("alpha", 1.0)) beta = _expr.const(attr.get("beta", 0.0)) return (alpha * inputs[0]) + beta class ThresholdedRelu(OnnxOpConverter): """Operator converter for ThresholdedRelu.""" @classmethod def _impl_v1(cls, inputs, attr, params): alpha = float(attr.get("alpha", 1.0)) alpha_tensor = _op.full_like(inputs[0], fill_value=_expr.const(alpha)) mask = _op.greater(inputs[0], alpha_tensor).astype("float32") return inputs[0] * mask def _broadcast_constraint(): def _broadcast_check(attrs): if attrs.get("axis", None): return False return True return _broadcast_check, "Specifying broadcast axis not allowed." def _fully_connected(opset): def _impl(inputs, attr, params): # get number of channels channels = infer_channels(inputs[1], params) attr["units"] = channels return AttrCvt("dense", ignores=["axis", "axis_w"])(inputs, attr) return _impl class Upsample(OnnxOpConverter): """Operator converter for Upsample (nearest mode).""" @classmethod def _impl_v9(cls, inputs, attr, params): scales = attr.get("scales") input_shape = infer_shape(inputs[0]) dims = len(input_shape) if not scales: # Here we are going to higher OPSET version. assert len(inputs) == 2, "Upsample op takes 2 inputs, {} given".format(len(inputs)) if get_name(inputs[1]) in params: scales = params[inputs[1].name_hint].numpy() else: scales = inputs[1] if isinstance(scales, _expr.Constant): scales = list(scales.data.numpy()) if not isinstance(scales, _expr.Expr): assert scales[0] == 1.0 and scales[1] == 1.0 mode = attr.get("mode") if mode == b"nearest": method = "nearest_neighbor" elif mode == b"linear": method = "trilinear" if dims == 5 else "bilinear" else: raise tvm.error.OpAttributeInvalid( 'Value {} in attribute "mode" of operator Upsample is not valid.'.format(mode) ) # in 3d case, we use the purely static op if dims == 5: if isinstance(scales, _expr.Expr): scale_h = _op.take(scales, _op.const(3)) scale_w = _op.take(scales, _op.const(4)) scale_d = _op.take(scales, _op.const(1)) else: assert len(scales) == 5 scale_h = scales[-2] scale_w = scales[-1] scale_d = scales[-3] layout = "NCDHW" out = _op.nn.upsampling3d( inputs[0], scale_d, scale_h, scale_w, layout=layout, method=method, coordinate_transformation_mode="asymmetric", ) # in 2d case, use dynamic op else: if isinstance(scales, _expr.Expr): scale_h = _op.take(scales, _op.const(3)) scale_w = _op.take(scales, _op.const(4)) else: assert len(scales) == 4 scale_h = scales[-2] scale_w = scales[-1] layout = "NCHW" out = _op.nn.upsampling( inputs[0], scale_h, scale_w, layout=layout, method=method, align_corners=False, ) return out class Shape(OnnxOpConverter): """Operator converter for Shape.""" @classmethod def _impl_v1(cls, inputs, attr, params): return shape_of(inputs[0], "int64") class CumSum(OnnxOpConverter): """Operator converter for CumSum.""" @classmethod def _impl_v1(cls, inputs, attr, params): data = inputs[0] dim = inputs[1] if dim is not None: dim = int(infer_value(dim, params).numpy()) exclusive = attr.get("exclusive", 0) reverse = attr.get("reverse", 0) if reverse != 0: out = _op.reverse(data, axis=dim) out = _op.cumsum(out, axis=dim, exclusive=exclusive) return _op.reverse(out, axis=dim) return _op.cumsum(data, axis=dim, exclusive=exclusive) class Cast(OnnxOpConverter): """Operator converter for Cast.""" @classmethod def _impl_v1(cls, inputs, attr, params): return AttrCvt(op_name="cast", transforms={"to": "dtype"})(inputs, attr) @classmethod def _impl_v5(cls, inputs, attr, params): try: from onnx.mapping import TENSOR_TYPE_TO_NP_TYPE attr["to"] = str(TENSOR_TYPE_TO_NP_TYPE[attr["to"]]) except ImportError as e: raise ImportError("Unable to import onnx.mapping which is required {}".format(e)) return AttrCvt(op_name="cast", transforms={"to": "dtype"})(inputs, attr) class Unsqueeze(OnnxOpConverter): """Operator converter for Unsqueeze.""" @classmethod def run_calculation(cls, tensor, axes): axes = sorted(axes) for axis in axes: tensor = _op.expand_dims(tensor, axis=axis, num_newaxis=1) return tensor @classmethod def _impl_v1(cls, inputs, attr, params): return cls.run_calculation(inputs[0], attr["axes"]) @classmethod def _impl_v13(cls, inputs, attr, params): if isinstance(inputs[1], _expr.Constant): constant_axes = list(inputs[1].data.numpy()) constant_axes = list(map(int, constant_axes)) return cls.run_calculation(inputs[0], constant_axes) rank_input = len(infer_type(inputs[0]).checked_type.shape) num_new_axis = int(infer_type(inputs[1]).checked_type.shape[0]) axes = relay.split(inputs[1], num_new_axis).astuple() result = inputs[0] # TODO (AndrewZhaoLuo): investigate performance issues with consecutive # dynamic expand_dims on non-llvm targets. for i in range(num_new_axis): axis = relay.TupleGetItem(axes, i) # Unpack scalar axis = relay.reshape(axis, []) axis = relay.where( axis >= relay.const(0, "int64"), axis, axis + relay.const(rank_input, "int64") ) result = _op.expand_dims(result, axis) return result class Squeeze(OnnxOpConverter): """Operator converter for Squeeze.""" @classmethod def _impl_v1(cls, inputs, attr, params): axis = attr.get("axes", None) return _op.squeeze(inputs[0], axis) @classmethod def _impl_v13(cls, inputs, attr, params): axis = inputs[1] dtype = infer_type(axis).checked_type.dtype if isinstance(axis, _expr.Constant): constant_axes = list(inputs[1].data.numpy()) constant_axes = list(map(int, constant_axes)) return _op.squeeze(inputs[0], constant_axes) rank = _op.shape_of(_op.shape_of(inputs[0], dtype), dtype) axis = _op.where(axis < _op.const(0, dtype), axis + rank, axis) return _op.squeeze(inputs[0], fold_constant(axis)) class Split(OnnxOpConverter): """Operator converter for Split.""" @classmethod def _impl_v1(cls, inputs, attr, params): splits = attr.get("split", None) if splits is not None and len(splits) > 1: indices = [] index = 0 for i in splits[:-1]: index += i indices.append(index) # When splits isnt specified divide evenly over axis. else: indices = attr["tvm_custom"]["num_outputs"] output = _op.split(inputs[0], indices, attr.get("axis", 0)) # If the output of split is a single value, unpack if from the TupleWrapper if len(output) == 1: output = output[0] return output @classmethod def _impl_v13(cls, inputs, attr, params): splits = inputs[1] splits_rank = None if splits is not None: splits_rank = len(infer_shape(splits)) if splits is not None and splits_rank > 0: if isinstance(splits, _expr.Constant): splits = splits.data.asnumpy() indices = [] index = 0 for i in splits[:-1]: index += i indices.append(index) else: raise ValueError("Dynamic Split not yet supported") # When splits isnt specified divide evenly over axis. else: indices = attr["tvm_custom"]["num_outputs"] output = _op.split(inputs[0], indices, attr.get("axis", 0)) # If the output of split is a single value, unpack if from the TupleWrapper if len(output) == 1: output = output[0] return output class Slice(OnnxOpConverter): """Operator converter for Slice.""" @classmethod def _common(cls, starts, ends, axes): N = max(axes) + 1 new_axes = list(range(N)) new_starts = [0] * N new_ends = [np.iinfo(np.int32).max] * N for i, axis in enumerate(axes): new_starts[axis] = starts[i] new_ends[axis] = ends[i] return new_starts, new_ends, new_axes @classmethod def _impl_v1(cls, inputs, attr, params): if isinstance(attr["starts"], int): attr["starts"] = (attr["starts"],) attr["ends"] = (attr["ends"],) try: # Update the starts and ends according to axes if required. if isinstance(attr["axes"], int): attr["axes"] = (attr["axes"],) new_starts, new_ends, new_axes = cls._common(attr["starts"], attr["ends"], attr["axes"]) attr["axes"] = new_axes attr["starts"] = new_starts attr["ends"] = new_ends except KeyError: pass begin = list(attr["starts"]) end = list(attr["ends"]) return _op.strided_slice(inputs[0], begin=begin, end=end) @classmethod def _impl_v10(cls, inputs, attr, params): starts = inputs[1] ends = inputs[2] axes = inputs[3] steps = inputs[4] ishape = infer_shape(inputs[0]) data_rank = len(ishape) if axes is not None: # Normalize for negative axes axes_dtype = infer_type(axes).checked_type.dtype axes = fold_constant( _op.where( axes < _op.const(0, axes_dtype), axes + _op.const(data_rank, axes_dtype), axes ) ) def has_static_axes(): return ( isinstance(axes, _expr.Constant) and isinstance(starts, _expr.Constant) and isinstance(ends, _expr.Constant) and (steps is None or isinstance(steps, _expr.Constant)) ) if axes is not None and has_static_axes(): axes_np = axes.data.numpy().astype("int64") begin_np = starts.data.numpy().astype("int64") end_np = ends.data.numpy().astype("int64") if steps is None: strides_np = np.ones_like(begin_np).astype("int64") else: strides_np = steps.data.numpy().astype("int64") if all([isinstance(ishape[i], int) for i in axes_np]): return _op.strided_slice( inputs[0], list(begin_np), list(end_np), list(strides_np), axes=list(axes_np) ) # Update the starts and ends according to axes if required. if axes is not None: data_shape = shape_of(inputs[0], dtype=infer_type(ends).checked_type.dtype) starts = _op.scatter( _op.const([0] * data_rank, dtype=infer_type(starts).checked_type.dtype), axes, starts, axis=0, ) ends = _op.scatter(data_shape, axes, ends, axis=0) if steps is not None: steps = _op.scatter( _op.const([1] * data_rank, dtype=infer_type(steps).checked_type.dtype), axes, steps, axis=0, ) if steps is None: steps = _op.const([1] * data_rank, dtype=infer_type(starts).checked_type.dtype) return _op.strided_slice( inputs[0], fold_constant(starts), fold_constant(ends), fold_constant(steps) ) def normalize_gather_indices(data, indices, axis): """Make sure gather indicies aren't negative""" ind_dtype = infer_type(indices).checked_type.dtype # Normalize the indices to a positive range s = _op.take(_op.shape_of(data, dtype=ind_dtype), _op.const(axis, dtype="int64")) cond = fold_constant(indices < _op.const(0, ind_dtype)) if isinstance(cond, _expr.Constant): val = cond.data.numpy() if val.size == 1: cond = val.item() if cond: indices = indices + s return indices indices = _op.where(cond, indices + s, indices) return indices class Gather(OnnxOpConverter): """Operator converter for Gather.""" @classmethod def _impl_v1(cls, inputs, attr, params): axis = attr.get("axis", 0) data = inputs[0] indices = inputs[1] indices = normalize_gather_indices(data, indices, axis) return _op.take(data, indices, axis) class GatherElements(OnnxOpConverter): """Operator converter for GatherElements.""" @classmethod def _impl_v1(cls, inputs, attr, params): data = inputs[0] indices = inputs[1] axis = attr.get("axis", 0) indices = normalize_gather_indices(data, indices, axis) return _op.gather(data, axis, indices) class GatherND(OnnxOpConverter): """Operator converter for GatherND.""" @classmethod def _impl_common(cls, data, indices, batch_dims=0): indices_dims = len(infer_shape(indices)) indices_shape = infer_shape(indices) indices = _op.transpose(indices, axes=[-1] + list(range(indices_dims - 1))) index_rank = indices_shape[-1] return _op.gather_nd( data, indices, batch_dims=batch_dims, index_rank=index_rank, ) @classmethod def _impl_v1(cls, inputs, attr, params): return cls._impl_common(inputs[0], inputs[1]) @classmethod def _impl_v12(cls, inputs, attr, params): batch_dims = attr.get("batch_dims", 0) return cls._impl_common(inputs[0], inputs[1], batch_dims) class Compress(OnnxOpConverter): """Operator converter for compress""" @classmethod def _impl_v11(cls, inputs, attr, params): input_tensor, condition_tensor = inputs axis = attr.get("axis", None) # Change one hot tensor to indices e.g. [0, 1, 1, 0, 1] -> [1, 2, 4] condition_tensor = _op.reshape(_op.argwhere(condition_tensor), (-1,)) if axis is not None: return _op.take(input_tensor, condition_tensor, axis=axis) # if axis is None, flatten input tensor before selection input_tensor = _op.reshape(input_tensor, (-1,)) return _op.take(input_tensor, condition_tensor, axis=0) class Scatter(OnnxOpConverter): """Operator converter for Scatter.""" @classmethod def _impl_v1(cls, inputs, attr, params): axis = attr.get("axis", 0) return _op.scatter(inputs[0], inputs[1], inputs[2], axis) class ScatterND(OnnxOpConverter): """Operator converter for ScatterND.""" @classmethod def _impl_v11(cls, inputs, attr, params): indices_dim = len(infer_shape(inputs[1])) axes = list(range(indices_dim)) return _op.scatter_nd( inputs[0], _op.transpose(inputs[1], axes[-1:] + axes[:-1]), inputs[2], "update" ) class EyeLike(OnnxOpConverter): """Operator converter for EyeLike.""" @classmethod def _impl_v9(cls, inputs, attr, params): in_checked_type = infer_type(inputs[0]).checked_type in_dtype = in_checked_type.dtype in_shape = list(get_const_tuple(in_checked_type.shape)) dtype = attr.get("dtype", None) if dtype is None: dtype = in_dtype else: dtype = get_type(dtype) zeros = _op.zeros(in_shape, dtype) dim = in_shape[0] indices = _op.arange(_op.const(0), _op.const(dim), dtype="int32") ones = _op.full(_op.const(1), (dim,), dtype=dtype) k = _op.const(attr.get("k", 0), dtype="int32") return _op.scatter_nd(zeros, _op.stack([indices, indices + k], axis=0), ones, "update") class LRN(OnnxOpConverter): """Operator converter for Local Response Normalization.""" @classmethod def _impl_v1(cls, inputs, attr, params): """LRN support only NCHW format https://github.com/onnx/onnx/blob/master/docs/Operators.md#LRN """ axis = 1 alpha = attr.get("alpha", 0.0001) beta = attr.get("beta", 0.75) bias = attr.get("bias", 1.0) nsize = attr.get("size") attr = {"size": nsize, "axis": axis, "alpha": alpha, "beta": beta, "bias": bias} return AttrCvt("lrn")(inputs, attr) class Maximum(OnnxOpConverter): """Operator converter for Maximum.""" @classmethod def _impl_v1(cls, inputs, attr, params): if len(inputs) == 1: return inputs[0] _max = inputs[0] for i in range(1, len(inputs)): _max = AttrCvt("maximum")([_max, inputs[i]], {}) return _max class Minimum(OnnxOpConverter): """Operator converter for Minimum.""" @classmethod def _impl_v1(cls, inputs, attr, params): if len(inputs) == 1: return inputs[0] _min = inputs[0] for i in range(1, len(inputs)): _min = AttrCvt("minimum")([_min, inputs[i]], {}) return _min class Mean(OnnxOpConverter): """Operator converter for Mean.""" @classmethod def _impl_v1(cls, inputs, attr, params): if len(inputs) == 1: return inputs[0] # avoid overflow concat = _op.concatenate([_op.expand_dims(x, axis=0) for x in inputs], axis=0) return _op.mean(concat, axis=0, keepdims=False) class HardSigmoid(OnnxOpConverter): """Operator converter for HardSigmoid.""" @classmethod def _impl_v1(cls, inputs, attr, params): alpha = attr.get("alpha", 0.2) beta = attr.get("beta", 0.5) transformX = (inputs[0] * _expr.const(alpha)) + _expr.const(beta) attr = {"a_min": 0, "a_max": 1} return AttrCvt("clip")([transformX], attr) class Reduce(OnnxOpConverter): """Operator converter for reduce ops.""" name = "" @classmethod def run_calculation(cls, inputs, axis, keepdims): attr = {"axis": axis, "keepdims": keepdims} return AttrCvt(cls.name)(inputs, attr) @classmethod def _impl_v1(cls, inputs, attr, params): if "axes" in attr: axis = attr.get("axes", 0) else: axis_len = len(infer_shape(inputs[0])) axis = list(range(axis_len)) return cls.run_calculation(inputs, axis, attr.get("keepdims", True)) @classmethod def _impl_v12(cls, inputs, attr, params): if len(inputs) == 2: if isinstance(inputs[1], _expr.Constant): # Get axis and unpack scalar constant_axis = int(inputs[1].data.numpy()[0]) return cls.run_calculation([inputs[0]], constant_axis, attr.get("keepdims", True)) raise ValueError("Dynamic Reduce is not supported yet!") return cls._impl_v1(inputs, attr, params) class ReduceMax(Reduce): """Operator converter for ReduceMax.""" name = "max" class ReduceMin(Reduce): """Operator converter for ReduceMin.""" name = "min" class ReduceSum(Reduce): """Operator converter for ReduceSum.""" name = "sum" class ReduceMean(Reduce): """Operator converter for ReduceMean.""" name = "mean" class ReduceProd(Reduce): """Operator converter for ReduceProd.""" name = "prod" class ReduceLogSumExp(Reduce): """Operator converter for ReduceLogSumExp.""" name = "logsumexp" class ReduceSumSquare(OnnxOpConverter): """Operator converter for ReduceSumSquare.""" @classmethod def _impl_v1(cls, inputs, attr, params): if "axes" in attr: axis = attr.get("axes", 0) else: axis_len = len(infer_shape(inputs[0])) axis = list(range(axis_len)) attr = {"axis": axis, "keepdims": attr.get("keepdims", True)} inputs[0] = inputs[0] * inputs[0] return AttrCvt("sum")(inputs, attr) class ReduceL1(OnnxOpConverter): """Operator converter for ReduceL1.""" @classmethod def _impl_v1(cls, inputs, attr, params): if "axes" in attr: axis = attr.get("axes", 0) else: axis_len = len(infer_shape(inputs[0])) axis = list(range(axis_len)) attr = {"axis": axis, "keepdims": attr.get("keepdims", True)} inputs[0] = _op.abs(inputs[0]) return AttrCvt("sum")(inputs, attr) class ReduceL2(OnnxOpConverter): """Operator converter for ReduceL2.""" @classmethod def _impl_v1(cls, inputs, attr, params): if "axes" in attr: axis = attr.get("axes", 0) else: axis_len = len(infer_shape(inputs[0])) axis = list(range(axis_len)) attr = {"axis": axis, "keepdims": attr.get("keepdims", True)} inputs[0] = inputs[0] * inputs[0] out = AttrCvt("sum")(inputs, attr) return _op.sqrt(out) class ReduceLogSum(OnnxOpConverter): """Operator converter for ReduceLogSum.""" @classmethod def _impl_v1(cls, inputs, attr, params): if "axes" in attr: axis = attr.get("axes", 0) else: axis_len = len(infer_shape(inputs[0])) axis = list(range(axis_len)) attr = {"axis": axis, "keepdims": attr.get("keepdims", True)} out = AttrCvt("sum")(inputs, attr) return _op.log(out) class ArgMax(OnnxOpConverter): """Operator converter for ArgMax.""" @classmethod def _impl_v13(cls, inputs, attr, params): axis = attr.get("axis", 0) keepdims = attr.get("keepdims", True) select_last_index = attr.get("select_last_index", False) attr = {"axis": axis, "keepdims": keepdims, "select_last_index": select_last_index} return _op.cast(AttrCvt("argmax")(inputs, attr), "int64") class ArgMin(OnnxOpConverter): """Operator converter for ArgMin.""" @classmethod def _impl_v13(cls, inputs, attr, params): axis = attr.get("axis", 0) keepdims = attr.get("keepdims", True) select_last_index = attr.get("select_last_index", False) attr = {"axis": axis, "keepdims": keepdims, "select_last_index": select_last_index} return _op.cast(AttrCvt("argmin")(inputs, attr), "int64") class Softmax(OnnxOpConverter): """Operator converter for Softmax.""" @classmethod def _impl_v1(cls, inputs, attr, params): axis = attr.get("axis", 1) ndim = len(infer_shape(inputs[0])) if axis < 0: axis += ndim axes = list(range(axis, ndim)) x = inputs[0] m = _op.max(x, axes, keepdims=True) e = _op.exp(x - m) return e / _op.sum(e, axes, keepdims=True) @classmethod def _impl_v13(cls, inputs, attr, params): axis = attr.get("axis", -1) ndim = len(infer_shape(inputs[0])) if axis < 0: axis += ndim axes = [axis] x = inputs[0] m = _op.max(x, axes, keepdims=True) e = _op.exp(x - m) return e / _op.sum(e, axes, keepdims=True) class LogSoftmax(OnnxOpConverter): """Operator converter for Softmax.""" @classmethod def run_calculation(cls, x, axes): """Run the calculation for Log Softmax calculation.""" m = _op.max(x, axes, keepdims=True) e = _op.exp(x - m) s = _op.sum(e, axes, keepdims=True) return x - m - _op.log(s) @classmethod def _impl_v1(cls, inputs, attr, params): axis = attr.get("axis", 1) ndim = len(infer_shape(inputs[0])) if axis < 0: axis += ndim axes = list(range(axis, ndim)) return cls.run_calculation(inputs[0], axes) @classmethod def _impl_v13(cls, inputs, attr, params): axis = attr.get("axis", -1) ndim = len(infer_shape(inputs[0])) if axis < 0: axis += ndim axes = [axis] return cls.run_calculation(inputs[0], axes) class Hardmax(OnnxOpConverter): """Operator converter for Hardmax.""" @classmethod def _impl_v1(cls, inputs, attr, params): axis = attr.get("axis", 1) ndim = len(infer_shape(inputs[0])) if axis < 0: axis += ndim dtype = infer_type(inputs[0]).checked_type.dtype if axis == 0: pre = _op.const([1], "int64") else: pre = _op.prod( _op.strided_slice(shape_of(inputs[0]), [0], [axis], [1]), axis=0, keepdims=True ) post = _op.prod( _op.strided_slice(shape_of(inputs[0]), [axis], [2147483647], [1]), axis=0, keepdims=True ) newshape = _op.concatenate([pre, post], axis=0) x = _op.reshape(inputs[0], fold_constant(newshape)) argmax = _op.argmax(x, axis=1) onehot = _op.one_hot( argmax, _op.const(1.0, dtype), _op.const(0.0, dtype), fold_constant(_op.take(shape_of(x), _op.const([1], "int64"))), 1, dtype, ) return _op.reshape(onehot, shape_of(inputs[0])) @classmethod def _impl_v13(cls, inputs, attr, params) -> relay.Expr: inferred_type = infer_type(inputs[0]) dtype = inferred_type.checked_type.dtype ndim = len(inferred_type.checked_type.shape) axis = attr.get("axis", -1) % ndim argmax = _op.argmax(inputs[0], axis=axis) return _op.one_hot( argmax, _op.const(1.0, dtype), _op.const(0.0, dtype), fold_constant(_op.take(shape_of(inputs[0]), _op.const([axis], "int64"))), axis, dtype, ) class OneHot(OnnxOpConverter): """Operator converter for OneHot.""" @classmethod def _impl_v9(cls, inputs, attr, params): # Extract relay one_hot inputs. indices, depth, values = inputs ndim = len(infer_shape(indices)) # Split onnx on off values into two separate expressions. off_value, on_value = _op.take(values, _op.const(0)), _op.take(values, _op.const(1)) # Extract the datatype of the output from on_value. dtype = infer_type(on_value).checked_type.dtype ind_dtype = infer_type(indices).checked_type.dtype # Normalize the indices to a positive range indices = _op.where( indices < _op.const(0, ind_dtype), indices + _op.cast(depth, ind_dtype), indices ) # set default value when axis is not set in the model if "axis" not in attr: attr["axis"] = -1 axis = attr["axis"] if axis < 0: axis += ndim + 1 return _op.one_hot(indices, on_value, off_value, depth, axis, dtype=dtype) class ConstantOfShape(OnnxOpConverter): """Operator converter for ConstantOfShape.""" @classmethod def _impl_v9(cls, inputs, attr, params): if "value" in attr: np_value = get_numpy(attr.pop("value"))[0] value = _expr.const(np_value) dtype = np_value.dtype.name else: value = _expr.const(0) dtype = "float32" output = _op.full(value, inputs[0], dtype=dtype) return output class Constant(OnnxOpConverter): """Operator converter for ConstantOfShape.""" @classmethod def _impl_v9(cls, inputs, attr, params): if "value" not in attr: raise tvm.errors.OpAttributeRequired("no value in Constant") value = attr.pop("value") # Constants may rarely have string types. These are likely exported # from other frameworks and not actually used in TVM. We'll just use # a zero valued constant for compatibility. if isinstance(value, bytes): np_value = np.asarray([0]).astype("int64") else: np_value = get_numpy(value) dtype = np_value.dtype.name value = _expr.const(np_value, dtype) return value class Sign(OnnxOpConverter): """Operator converter for Sign.""" @classmethod def _impl_v1(cls, inputs, attr, params): return _op.sign(inputs[0]) class Equal(Elemwise): """Operator converter for Equal.""" name = "equal" class Not(Elemwise): """Operator converter for Not.""" @classmethod def _impl_v1(cls, inputs, attr, params): return _op.logical_not(inputs[0]) class And(Elemwise): """Operator converter for And.""" @classmethod def _impl_v1(cls, inputs, attr, params): return _op.logical_and(inputs[0], inputs[1]) class Tile(Elemwise): """Operator converter for Tile""" @classmethod def _impl_v6(cls, inputs, attr, params): return _op.tile(inputs[0], inputs[1]) class Erf(OnnxOpConverter): """Operator converter for Erf""" @classmethod def _impl_v1(cls, inputs, attr, params): return _op.erf(inputs[0]) class Where(OnnxOpConverter): """Operator converter for Where""" @classmethod def _impl_v9(cls, inputs, attr, params): condition_rank = len(infer_shape(inputs[0])) x_rank = len(infer_shape(inputs[1])) y_rank = len(infer_shape(inputs[2])) ranks = [condition_rank, x_rank, y_rank] # If one rank is longer than others, then we can broadcast # to that shape. max_rank = max(ranks) max_rank_idxs = [i for i, x in enumerate(ranks) if x == max_rank] broadcast_shape = shape_of(inputs[max_rank_idxs[0]]) # If two or more inputs have the same rank, compute the broadcast # shape by taking the maximum value of each dimensions. if len(max_rank_idxs) > 1: for idx in max_rank_idxs: broadcast_shape = _op.maximum(broadcast_shape, shape_of(inputs[idx])) broadcast_shape = fold_constant(broadcast_shape) condition = _op.broadcast_to(inputs[0], broadcast_shape) x = _op.broadcast_to(inputs[1], broadcast_shape) y = _op.broadcast_to(inputs[2], broadcast_shape) return _op.where(condition, x, y) class Or(Elemwise): """Operator converter for Or.""" @classmethod def _impl_v7(cls, inputs, attr, params): return _op.logical_or(inputs[0], inputs[1]) class Expand(OnnxOpConverter): """Operator converter for Expand.""" @classmethod def _impl_v8(cls, inputs, attr, params): dtype = infer_type(inputs[1]).checked_type.dtype in_shape = shape_of(inputs[0], dtype=dtype) shape = inputs[1] # Currently 'op.broadcast_to' expect the rank of the given 'shape' # (the 2nd input) is always higher than that of the given 'input' (the 1st input) # However, ONNX Expand supports multi-directional broadcasting, which allows # above pattern and also some extent of 'shape' can be smaller than the corresponding # extent of 'input'. In this case, the extent of 'shape' must be 1. # https://github.com/onnx/onnx/blob/master/docs/Broadcasting.md # In above cases, we cannot directorly apply 'op.broadcast_to' instead of 'expand' # so, here we solved this problem by expanding the given 'shape' itself. def expand_shape(in_shape, shape): """A function expands the shape when the rank is lower than that of the given intput. Also it replaces the extent of the shape with the corresponding extent of the intput when it is 1. """ in_dims = infer_shape(in_shape)[0] new_dims = infer_shape(shape)[0] if in_dims < new_dims: in_shape = _op.concatenate( [ _expr.const( [ 1, ] * (new_dims - in_dims), dtype=dtype, ), in_shape, ], axis=0, ) elif new_dims > in_dims: shape = _op.concatenate( [ _expr.const( [ 1, ] * (in_dims - new_dims), dtype=dtype, ), shape, ], axis=0, ) new_shape = _op.maximum(in_shape, shape) return new_shape shape = fold_constant(expand_shape(in_shape, shape)) return _op.broadcast_to(inputs[0], shape=shape) class RNN(OnnxOpConverter): """Operator converter for RNNs such as LSTM and GRU.""" @classmethod def _activation_helper(cls, activation, alpha, beta): convert_map = _get_convert_map(1) attrs = {} if alpha is not None: attrs["alpha"] = alpha if beta is not None: attrs["beta"] = beta return lambda x: convert_map[activation.decode("utf-8")]([x], attrs, {}) @classmethod def _activation_needs_alpha(cls, activation): needs_alpha = [ "Affine", "LeakyRelu", "ThresholdedRelu", "ScaledTanh", "HardSigmoid", "Elu", ] return activation.decode("utf-8") in needs_alpha @classmethod def _activation_needs_beta(cls, activation): needs_beta = [ "Affine", "ScaledTanh", "HardSigmoid", ] return activation.decode("utf-8") in needs_beta class LSTM(RNN): """Operator converter for LSTM""" @classmethod def bidir_lstm_cell( cls, input_seqs, weight_dicts, acts, ): """ Bidirectional LSTM cell """ seq_len = len(input_seqs) forward_outputs, fw_H_t, fw_C_t = lstm_cell( input_seqs, **weight_dicts[0], f_act=acts[0], g_act=acts[1], h_act=acts[2], ) reverse_outputs, rev_H_t, rev_C_t = lstm_cell( input_seqs, **weight_dicts[1], f_act=acts[3], g_act=acts[4], h_act=acts[5], backwards=True, ) final_outputs = [] for i in range(seq_len): final_outputs.append( _op.stack([forward_outputs[i], reverse_outputs[seq_len - 1 - i]], axis=0) ) return ( _op.stack(final_outputs, axis=0), _op.stack([fw_H_t, rev_H_t], axis=0), _op.stack([fw_C_t, rev_C_t], axis=0), ) @classmethod def _impl_v7(cls, inputs, attr, params): # Unpack inputs, note that if optional and not provided then value will be None. X = inputs[0] Wp = inputs[1] Rp = inputs[2] Bp = inputs[3] # Sequence length currently unused as it can be inferred from shapes. # sequence_lens = inputs['sequence_lens'] Hp_0 = inputs[5] Cp_0 = inputs[6] Pp = inputs[7] num_directions = infer_shape(Wp)[0] W_dtype = infer_type(Wp).checked_type.dtype if num_directions not in [1, 2]: raise ValueError("num_directions must be either 1 or 2!") X_shape = infer_shape(X) hidden_size = infer_shape(Rp)[-1] batch_size = X_shape[1] # Initialize state if not provided. # Otherwise remove bidirectional axis. if Hp_0 is None: Hp_0 = _op.zeros((num_directions, batch_size, hidden_size), W_dtype) if Cp_0 is None: Cp_0 = _op.zeros((num_directions, batch_size, hidden_size), W_dtype) if "activations" in attr: activations = attr["activations"] if len(activations) != 3 * num_directions: raise NotImplementedError( f"LSTM assumes 3 * num_directions activation functions are provided" ) alpha_loc = 0 alphas = attr.get("activation_alpha", []) if isinstance(alphas, float): alphas = [alphas] beta_loc = 0 betas = attr.get("activation_beta", []) if isinstance(betas, float): betas = [betas] acts = [] for i in range(3 * num_directions): alpha = None beta = None activation = activations[i] if cls._activation_needs_alpha(activation) and len(alphas) > alpha_loc: alpha = alphas[alpha_loc] alpha_loc += 1 if cls._activation_needs_beta(activation) and len(betas) > beta_loc: beta = betas[beta_loc] beta_loc += 1 acts.append(cls._activation_helper(activation, alpha, beta)) else: acts = [_op.sigmoid, _op.tanh, _op.tanh] * num_directions # TODO (vvchernov): It can be replaced by _op.split if issue #8412 is resolved X_steps = unbind(X, axis=0) H_ts = _op.split(Hp_0, num_directions) C_ts = _op.split(Cp_0, num_directions) Ws = _op.split(Wp, num_directions) Rs = _op.split(Rp, num_directions) if Bp is not None: Bs = _op.split(Bp, num_directions) if Pp is not None: p_i, p_o, p_f = _op.split(Pp, 3, axis=1) p_is = _op.split(p_i, num_directions) p_fs = _op.split(p_f, num_directions) p_os = _op.split(p_o, num_directions) weights_dicts = [] for i in range(num_directions): weights_dict = {} weights_dict["hidden_state"] = _op.squeeze(H_ts[i], axis=[0]) weights_dict["cell_state"] = _op.squeeze(C_ts[i], axis=[0]) # Weights permutation: onnx format i-o-f-c, lstm cell format i-f-c-o mati, mato, matf, matc = _op.split(_op.squeeze(Ws[i], axis=[0]), 4) weights_dict["w_inp"] = _op.concatenate([mati, matf, matc, mato], axis=0) mati, mato, matf, matc = _op.split(_op.squeeze(Rs[i], axis=[0]), 4) weights_dict["w_hid"] = _op.concatenate([mati, matf, matc, mato], axis=0) if Bp is not None: Bi, Bh = _op.split(Bs[i], 2, -1) mati, mato, matf, matc = _op.split(_op.squeeze(Bi, axis=[0]), 4) weights_dict["b_inp"] = _op.concatenate([mati, matf, matc, mato], axis=0) mati, mato, matf, matc = _op.split(_op.squeeze(Bh, axis=[0]), 4) weights_dict["b_hid"] = _op.concatenate([mati, matf, matc, mato], axis=0) if Pp is not None: weights_dict["p_i"] = _op.squeeze(p_is[i], axis=[0]) weights_dict["p_f"] = _op.squeeze(p_fs[i], axis=[0]) weights_dict["p_o"] = _op.squeeze(p_os[i], axis=[0]) weights_dicts.append(weights_dict) if num_directions == 2: output, H, C = LSTM.bidir_lstm_cell( input_seqs=X_steps, weight_dicts=weights_dicts, acts=acts, ) else: # outputs shape = [seqs_num, (batch_size, hidden_size)] outputs, H, C = lstm_cell( input_seqs=X_steps, **weights_dicts[0], f_act=acts[0], g_act=acts[1], h_act=acts[2], ) # output shape = (seqs_num, num_directions, batch_size, hidden_size) output = _op.expand_dims(_op.stack(outputs, axis=0), axis=1) H = _op.expand_dims(H, axis=0) C = _op.expand_dims(C, axis=0) return _expr.TupleWrapper(_expr.Tuple((output, H, C)), 3) class GRU(RNN): """Operator convert for GRU""" @classmethod def bidir_gru_cell( cls, input_seqs, weight_dicts, acts, ): """ Bidirectional GRU cell """ seq_len = len(input_seqs) forward_outputs, fw_H_t = gru_cell( input_seqs, **weight_dicts[0], rz_act=acts[0], n_act=acts[1], ) reverse_outputs, rev_H_t = gru_cell( input_seqs, **weight_dicts[1], rz_act=acts[2], n_act=acts[3], backwards=True, ) final_outputs = [] for i in range(seq_len): final_outputs.append( _op.stack([forward_outputs[i], reverse_outputs[seq_len - 1 - i]], axis=0) ) return ( _op.stack(final_outputs, axis=0), _op.stack([fw_H_t, rev_H_t], axis=0), ) @classmethod def _impl_v7(cls, inputs, attr, params): # Unpack inputs, note that if optional and not provided then value will be None. X = inputs[0] Wp = inputs[1] Rp = inputs[2] Bp = inputs[3] # Sequence length currently unused as it can be inferred from shapes. # sequence_lens = inputs['sequence_lens'] Hp_0 = inputs[5] linear_before_reset = attr.get("linear_before_reset", 0) num_directions = infer_shape(Wp)[0] W_dtype = infer_type(Wp).checked_type.dtype if num_directions not in [1, 2]: raise ValueError("num_directions must be either 1 or 2!") X_shape = infer_shape(X) hidden_size = infer_shape(Rp)[-1] batch_size = X_shape[1] if Hp_0 is None: Hp_0 = _op.zeros((num_directions, batch_size, hidden_size), W_dtype) if "activations" in attr: activations = attr["activations"] if len(activations) != 2 * num_directions: raise NotImplementedError( "GRU assumes 2 * num_directions activation functions are provided" ) alpha_loc = 0 alphas = attr.get("activation_alpha", []) if isinstance(alphas, float): alphas = [alphas] beta_loc = 0 betas = attr.get("activation_beta", []) if isinstance(betas, float): betas = [betas] acts = [] for i in range(2 * num_directions): alpha = None beta = None activation = activations[i] if cls._activation_needs_alpha(activation) and len(alphas) > alpha_loc: alpha = alphas[alpha_loc] alpha_loc += 1 if cls._activation_needs_beta(activation) and len(betas) > beta_loc: beta = betas[beta_loc] beta_loc += 1 acts.append(cls._activation_helper(activation, alpha, beta)) else: acts = [_op.sigmoid, _op.tanh] * 2 # TODO (vvchernov): It can be replaced by _op.split if issue #8412 is resolved X_steps = unbind(X, axis=0) H_ts = _op.split(Hp_0, num_directions) Ws = _op.split(Wp, num_directions) Rs = _op.split(Rp, num_directions) if Bp is not None: Bs = _op.split(Bp, num_directions) weights_dicts = [] for i in range(num_directions): weights_dict = {} weights_dict["hidden_state"] = _op.squeeze(H_ts[i], axis=[0]) weights_dict["linear_before_reset"] = linear_before_reset # Weights permutation: onnx format i-o-f-c, lstm cell format i-f-c-o matz, matr, matn = _op.split(_op.squeeze(Ws[i], axis=[0]), 3) weights_dict["w_inp"] = _op.concatenate([matr, matz, matn], axis=0) matz, matr, matn = _op.split(_op.squeeze(Rs[i], axis=[0]), 3) weights_dict["w_hid"] = _op.concatenate([matr, matz, matn], axis=0) if Bp is not None: Bi, Bh = _op.split(Bs[i], 2, -1) matz, matr, matn = _op.split(_op.squeeze(Bi, axis=[0]), 3) weights_dict["b_inp"] = _op.concatenate([matr, matz, matn], axis=0) matz, matr, matn = _op.split(_op.squeeze(Bh, axis=[0]), 3) weights_dict["b_hid"] = _op.concatenate([matr, matz, matn], axis=0) weights_dicts.append(weights_dict) if num_directions == 2: output, H = GRU.bidir_gru_cell( input_seqs=X_steps, weight_dicts=weights_dicts, acts=acts, ) else: # outputs shape = [seqs_num, (batch_size, hidden_size)] outputs, H = gru_cell( input_seqs=X_steps, **weights_dicts[0], rz_act=acts[0], n_act=acts[1], ) # output shape = (seqs_num, num_directions, batch_size, hidden_size) output = _op.expand_dims(_op.stack(outputs, axis=0), axis=1) H = _op.expand_dims(H, axis=0) return _expr.TupleWrapper(_expr.Tuple((output, H)), 2) class Resize(OnnxOpConverter): """Operator converter for Resize""" @classmethod def _impl_v10(cls, inputs, attr, params): mode = attr.get("mode").decode("ascii") if mode == "nearest": method = "nearest_neighbor" elif mode == "linear": method = "linear" elif mode == "cubic": method = "cubic" else: raise tvm.error.OpAttributeInvalid( 'Value {} in attribute "mode" of operator Resize is not valid.'.format(mode) ) scale = inputs[1] size = _op.cast(shape_of(inputs[0]), infer_type(scale).checked_type.dtype) * scale ndims = len(infer_shape(inputs[0])) out = None if ndims == 3: out_size = fold_constant(_op.strided_slice(size, [2], [3])) out = _op.image.resize1d(inputs[0], out_size, None, "NCW", method, "asymmetric") elif ndims == 4: out_size = fold_constant(_op.strided_slice(size, [2], [4])) out = _op.image.resize2d(inputs[0], out_size, None, "NCHW", method, "asymmetric") elif ndims == 5: out_size = fold_constant(_op.strided_slice(size, [2], [5])) out = _op.image.resize3d(inputs[0], out_size, None, "NCDHW", method, "asymmetric") else: raise NotImplementedError("Resize only supports 3, 4, or 5 dims") return out @classmethod def _impl_v11(cls, inputs, attr, params): scale = inputs[2] scale_shape = infer_shape(scale) if len(inputs) == 4: assert ( len(scale_shape) == 0 or scale_shape[0] == 0 ), "One of scale or size should be passed, not both." size = inputs[3] else: assert len(scale_shape) != 0, "One of scale or size should be passed." size = _op.cast(shape_of(inputs[0]), infer_type(scale).checked_type.dtype) * scale return cls.v11_13_common(inputs, size, attr, params) @classmethod def _impl_v13(cls, inputs, attr, params): scale = inputs[2] size = inputs[3] # Some versions of onnx exporters produce an opset 13 model with the opset 11 # resize op, handle that edge case if scale is not None and size is not None: return cls._impl_v11(inputs, attr, params) if size is not None: assert scale is None, "One of scale or size should be passed, not both." else: scale_type = infer_type(scale) scale_shape = scale_type.checked_type.shape scale_dtype = scale_type.checked_type.dtype assert len(scale_shape) != 0, "One of scale or size should be passed." size = _op.cast(shape_of(inputs[0]), scale_dtype) * scale return cls.v11_13_common(inputs, size, attr, params) @classmethod def v11_13_common(cls, inputs, size, attr, params): """ Resize v11 and Resize v13 are identical except in how they handle the passing of scale and size. This utility provides the implementation for both """ roi = inputs[1] if roi is not None and infer_shape(roi)[0] == 0: roi = None ndims = len(infer_shape(inputs[0])) mode = attr.get("mode").decode("ascii") if mode == "nearest": method = "nearest_neighbor" elif mode == "linear": method = "linear" elif mode == "cubic": method = "cubic" else: raise tvm.error.OpAttributeInvalid( 'Value {} in attribute "mode" of operator Resize is not valid.'.format(mode) ) coord_trans = attr.get("coordinate_transformation_mode", b"half_pixel").decode("ascii") nearest_mode = attr.get("nearest_mode", b"round_prefer_floor").decode("ascii") alpha = attr.get("cubic_coeff_a", -0.75) exclude = attr.get("exclude_outside", 0) extrapolation_value = attr.get("extrapolation_value", 0.0) if roi is not None: roi = fold_constant( _op.concatenate( [ _op.strided_slice(roi, [2], [ndims]), _op.strided_slice(roi, [ndims + 2], [2 * ndims]), ], axis=0, ) ) out_size = fold_constant(_op.strided_slice(size, [2], [ndims])) out = None if ndims == 3: out = _op.image.resize1d( inputs[0], out_size, roi, "NCW", method, coord_trans, nearest_mode, alpha, exclude, extrapolation_value, ) elif ndims == 4: out = _op.image.resize2d( inputs[0], out_size, roi, "NCHW", method, coord_trans, nearest_mode, alpha, exclude, extrapolation_value, ) elif ndims == 5: out = _op.image.resize3d( inputs[0], out_size, roi, "NCDHW", method, coord_trans, nearest_mode, alpha, exclude, extrapolation_value, ) else: raise NotImplementedError("Resize only supports 3, 4, or 5 dims") return out class NonZero(OnnxOpConverter): """Operator converter for NonZero""" @classmethod def _impl_v9(cls, inputs, attr, params): if len(inputs) > 1: raise ValueError("Expect 1 input only") output = AttrCvt(op_name="argwhere")(inputs, attr, params) # ONNX NonZero always outputs int64 output = _op.cast(output, "int64") return _op.transpose(output, axes=(1, 0)) class ReverseSequence(OnnxOpConverter): """Operator converter for ReverseSequence""" @classmethod def _impl_v10(cls, inputs, attr, params): return _op.reverse_sequence(inputs[0], inputs[1], attr["time_axis"], attr["batch_axis"]) class TopK(OnnxOpConverter): """Operator converter for TopK""" @classmethod def _impl_v1(cls, inputs, attr, params): if len(inputs) != 2: raise ValueError("Expect 2 input only") axis = attr.get("axis", -1) largest = attr.get("largest", 1) if largest == 0: # TODO(mbrookhart): optimize this by adding a smallest attribute to topi if this # ever becomes a bottleneck ndim = len(infer_shape(inputs[0])) if axis < 0: axis += ndim sort = _op.sort(inputs[0], axis=axis) argsort = _op.argsort(inputs[0], axis=axis, dtype="int64") begin = [0] * ndim stride = [1] * ndim end = _op.concatenate( [ _op.const([np.iinfo(np.int64).max] * axis, dtype="int64"), inputs[1], _op.const([np.iinfo(np.int64).max] * (ndim - axis - 1), dtype="int64"), ], axis=0, ) return _expr.TupleWrapper( _expr.Tuple( [ _op.strided_slice(sort, begin, end, stride), _op.strided_slice(argsort, begin, end, stride), ] ), 2, ) return _op.topk(inputs[0], inputs[1], axis=axis, dtype="int64") class Range(OnnxOpConverter): """Operator converter for Range""" @classmethod def _impl_v1(cls, inputs, attr, params): if len(inputs) != 3: raise ValueError("Expect 3 input only") return _op.arange( inputs[0], inputs[1], inputs[2], dtype=infer_type(inputs[0]).checked_type.dtype ) class IsInf(OnnxOpConverter): """Operator converter for IsInf""" @classmethod def _impl_v10(cls, inputs, attr, params): detect_negative = attr.get("detect_negative", 1) detect_positive = attr.get("detect_positive", 1) dtype = infer_type(inputs[0]).checked_type.dtype isinf = _op.isinf(inputs[0]) if not detect_negative: isinf = isinf * (inputs[0] > _op.const(0, dtype)) if not detect_positive: isinf = isinf * (inputs[0] < _op.const(0, dtype)) return isinf class Celu(OnnxOpConverter): """Operator convereter for celu""" @classmethod def _impl_v12(cls, inputs, attr, params): x = inputs[0] dtype = infer_type(x).checked_type.dtype alpha = _op.const(attr.get("alpha", 1.0), dtype) zero = _op.const(0, dtype) one = _op.const(1, dtype) out = _op.maximum(zero, x) + _op.minimum(zero, alpha * (_op.exp(x / alpha) - one)) return out class MaxRoiPool(OnnxOpConverter): """Operator converter for MaxRoiPool.""" @classmethod def _impl_v1(cls, inputs, attr, params): assert len(inputs) == 2, "MMaxRoiPool op take 2 inputs, {} given".format(len(inputs)) data = inputs[0] rois = inputs[1] pooled_shape = attr.get("pooled_shape") spatial_scale = attr.get("spatial_scale", 1.0) return _vision.roi_pool(data, rois, pooled_shape, spatial_scale) class RoiAlign(OnnxOpConverter): """Operator converter for RoiAlign.""" @classmethod def _impl_v1(cls, inputs, attr, params): if len(inputs) != 3: raise ValueError("Expect 3 inputs only") x = inputs[0] rois = inputs[1] batch_indices = inputs[2] mode = attr.get("mode", b"avg") if mode not in (b"avg", b"max"): raise NotImplementedError("RoiAlign in Relay only uses avg and max modes") output_height = attr.get("output_height", 1) output_width = attr.get("output_width", 1) sampling_ratio = attr.get("sampling_ratio", 0) spatial_scale = attr.get("spatial_scale", 1.0) batch_indices = _op.expand_dims(batch_indices, axis=1, num_newaxis=1) batch_indices = _op.cast(batch_indices, infer_type(rois).checked_type.dtype) rois = _op.concatenate([batch_indices, rois], 1) return _vision.roi_align( x, rois, [output_height, output_width], spatial_scale, sampling_ratio, mode=mode ) class Clip(OnnxOpConverter): """Operator converter for Clip.""" @staticmethod def convert_attributes(inputs, attr, params): convert = AttrCvt("clip", transforms={"min": "a_min", "max": "a_max"}) return convert(inputs, attr, params) @classmethod def _impl_v1(cls, inputs, attr, params): if "min" not in attr: attr["min"] = -np.inf if "max" not in attr: attr["max"] = np.inf return Clip.convert_attributes(inputs, attr, params) @classmethod def _impl_v11(cls, inputs, attr, params): if len(inputs) == 3 and isinstance(inputs[2], _expr.Constant): attr["max"] = inputs[2].data.numpy().item() inputs = inputs[0:2] if len(inputs) >= 2 and isinstance(inputs[1], _expr.Constant): attr["min"] = inputs[1].data.numpy().item() inputs = inputs[0:1] if "min" in attr and "max" in attr: return Clip.convert_attributes(inputs, attr, params) assert len(inputs) <= 3, "Clip-11 takes up to 3 inputs, input, min, max" result = inputs[0] for i, op in enumerate([_op.tensor.maximum, _op.tensor.minimum]): if i < len(inputs) - 1: if inputs[i + 1] is not None: result = op(result, inputs[i + 1]) return result class Softplus(OnnxOpConverter): """Operator converter for Softplus.""" @classmethod def _impl_v1(cls, inputs, attr, params): data = inputs[0] data_dtype = infer_type(data).checked_type.dtype data = _op.exp(data) + _expr.const(1, dtype=data_dtype) return _op.log(data) class Loop(OnnxOpConverter): """Operator converter for Loop""" @classmethod def _impl_v11(cls, inputs, attr, params): max_loop_count = inputs[0] cond = inputs[1] loop_deps = inputs[2:] num_deps = len(loop_deps) # Create a copy of the body function to prevent the original # from being modified. body = copy.copy(attr["body"]) iter_dtype = infer_type(max_loop_count).checked_type.dtype # Determine what condition mode we're in. assert cond is not None or max_loop_count is not None is_for_loop = max_loop_count is not None and cond is None is_condition_for_loop = cond is not None and max_loop_count is not None # Loop inputs will be packed as # [iter_count, max_count, condition, loop_deps, scan_outputs] def cond_fn(*loop_inputs): i = loop_inputs[0] max_count = loop_inputs[1] w = loop_inputs[2] if cond is not None: out_while = _op.equal(w, _expr.const(True, "bool")) if max_loop_count is not None: out_loop = _op.less(i, max_count) if is_condition_for_loop: return _op.logical_and(out_while, out_loop) if is_for_loop: return out_loop return out_while # Get the current graph proto and create a clone for the subgraph graph_scope = GraphProto.current subgraph_scope = GraphProto( graph_scope._shape, graph_scope._dtype, graph_scope._freeze_params ) # Load nodes from outer graph into inner graph. subgraph_scope._nodes = graph_scope._nodes.copy() # Create a list of variables for each value updated in the loop. def get_var(name, val, scan=False): checked_type = infer_type(val) if hasattr(checked_type, "type_annotation"): checked_type = checked_type.type_annotation if hasattr(checked_type, "checked_type"): checked_type = checked_type.checked_type shape = get_const_tuple(checked_type.shape) actual_shape = [] for dim in shape: if isinstance(dim, int) and dim == 0: actual_shape.append(_ty.Any()) else: actual_shape.append(dim) if scan: return _expr.var(name, shape=[_ty.Any()] + actual_shape, dtype=checked_type.dtype) return _expr.var(name, shape=actual_shape, dtype=checked_type.dtype) loop_vars = [ _expr.var(body.input[0].name, shape=(), dtype=iter_dtype), # iteration count _expr.var("max_count", shape=(), dtype=iter_dtype), # iteration count get_var(body.input[1].name, cond), # exit condition ] loop_vars += [get_var(body.input[i + 2].name, v) for i, v in enumerate(loop_deps)] loop_var_names = [v.name_hint for v in loop_vars] num_scan_outputs = len(body.output) - (1 + num_deps) # Construct variables and initial empty tensors for any scan outputs. # To do this, we'll figure out the output shapes of the body subgraph by importing # it and doing type inference. scan_output_vars = [] scan_output_init = [] if num_scan_outputs > 0: with subgraph_scope: loop_outputs = subgraph_scope.from_onnx( body, graph_scope.opset, get_output_expr=True ) loop_outputs = _expr.TupleWrapper(loop_outputs, len(body.output)) for i in range(num_scan_outputs): name, _, _, _ = get_info(body.output[i + 1 + num_deps]) output_node = infer_type(loop_outputs[i + 1 + num_deps]) shape = get_const_tuple(output_node.checked_type.shape) dtype = output_node.checked_type.dtype scan_output_vars.append( _expr.var(name, shape=([_ty.Any()] * (len(shape) + 1)), dtype=dtype) ) scan_output_init.append( _op.reshape(_expr.const(np.array([]).astype(dtype)), [0] + [1] * len(shape)) ) # Now we can remove loop iter variables from our inner loop's inputs. # This is kind of a hack since we have graph inputs that we don't # want to treat as actual inputs. while len(body.input) != 0: body.input.pop(0) # Define the loop body, in this function we need to unpack loop inputs, # convert the loop subgraph, and pack outputs for the next iteration. def body_fn(*loop_inputs): # Unpack inputs loop_count = loop_inputs[0] max_count = loop_inputs[1] cond = loop_inputs[2] current_vars = list(loop_inputs[3 : (3 + num_deps)]) scan_outputs = loop_inputs[(3 + num_deps) :] # Prepare body inputs by adding them to node dictionary. new_inputs = [loop_count, max_count, cond] + current_vars for i, inp in enumerate(new_inputs): subgraph_scope._nodes[loop_var_names[i]] = inp # Get the output of the current loop using the updated inputs. with subgraph_scope: loop_outputs = subgraph_scope.from_onnx( body, graph_scope.opset, get_output_expr=True ) # Unpack the body outputs and prepare variables for next iteration. new_cond = loop_outputs[0] new_loop_vars = [loop_outputs[i] for i in range(1, 1 + num_deps)] new_scan_outputs = [loop_outputs[i] for i in range(1 + num_deps, len(loop_outputs))] # Add new scan outputs to tracking combined_scan_outputs = [] for i, scan in enumerate(scan_outputs): rank = len(infer_shape(scan)) - 1 new_scan = new_scan_outputs[i] expand_scan = _op.expand_dims(new_scan, axis=0) # For non scalar outputs we need to broadcast the initial value. if rank > 0: new_scan_shape = shape_of(new_scan, dtype=iter_dtype) scan_broadcast = _op.concatenate( [_op.reshape(loop_count, [1]), new_scan_shape], axis=0 ) scan = _op.broadcast_to(scan, scan_broadcast) combined_scan = _op.concatenate([scan, expand_scan], axis=0) combined_scan_outputs.append(combined_scan) # Increment counter. if max_loop_count is not None: incr = _expr.const(1, dtype=iter_dtype) loop_count = loop_count + incr # Pack loop outputs for next iteration # [iter_count, cond, loop_deps, loop_scans] return [loop_count, max_count, new_cond] + new_loop_vars + combined_scan_outputs # Create the loop function. loop = fold_constant(_loops.while_loop(cond_fn, loop_vars + scan_output_vars, body_fn)) # Now need to run initial values through the graph. init_count = _expr.const(0, dtype=iter_dtype) loop_vals = loop(init_count, max_loop_count, cond, *loop_deps, *scan_output_init) # Extract final iteration outputs. if num_deps + num_scan_outputs == 1: outputs = _expr.TupleGetItem(loop_vals, 3) else: outputs = _expr.TupleWrapper( _expr.Tuple( [ _expr.TupleGetItem(loop_vals, i + 3) for i in range(num_deps + num_scan_outputs) ] ), num_deps + num_scan_outputs, ) # Update outer graph with constants found in the subgraph. free_vars = analysis.free_vars(loop) graph_scope._params.update(subgraph_scope._params) graph_scope._nodes.update(subgraph_scope._nodes) for var in free_vars: graph_scope._nodes.update({var.name_hint: var}) return outputs class If(OnnxOpConverter): """Operator converter for If""" @classmethod def _impl_v1(cls, inputs, attr, params): cond = inputs[0] # Convert array to bool if needed. if len(infer_shape(cond)) > 0: cond = _op.take(cond, _expr.const(0, dtype="int64")) then_branch = attr.get("then_branch", None) else_branch = attr.get("else_branch", None) assert then_branch is not None and else_branch is not None # Create graph converters for both branches. graph_scope = GraphProto.current then_graph = GraphProto(graph_scope._shape, graph_scope._dtype, graph_scope._freeze_params) then_graph._nodes = graph_scope._nodes.copy() else_graph = GraphProto(graph_scope._shape, graph_scope._dtype, graph_scope._freeze_params) else_graph._nodes = graph_scope._nodes.copy() # Convert each branch to a relay expression. with then_graph: then_expr = then_graph.from_onnx(then_branch, graph_scope.opset, get_output_expr=True) with else_graph: else_expr = else_graph.from_onnx(else_branch, graph_scope.opset, get_output_expr=True) # Add constants from both branches to parent graph. graph_scope._params.update(then_graph._params) graph_scope._nodes.update(then_graph._nodes) then_free_vars = analysis.free_vars(then_expr) for var in then_free_vars: graph_scope._nodes.update({var.name_hint: var}) graph_scope._params.update(else_graph._params) graph_scope._nodes.update(else_graph._nodes) else_free_vars = analysis.free_vars(else_expr) for var in else_free_vars: graph_scope._nodes.update({var.name_hint: var}) # Now we can construct the relay if statement and return. ret = _expr.If(cond, then_expr, else_expr) if len(then_branch.output) > 1: ret = _expr.TupleWrapper(ret, len(then_branch.output)) return ret class Scan(OnnxOpConverter): """Operator converter for Scan""" @classmethod def _impl_v8(cls, inputs, attr, params): new_inputs = inputs[1:] batch_num = infer_shape(inputs[1])[0] out = [] for i in range(batch_num): v9_inputs = [ _op.take(new_inputs[j], _expr.const(i), axis=0) for j in range(len(new_inputs)) ] results = cls._impl_v9(v9_inputs, attr, params) results = [_op.expand_dims(results[j], axis=0) for j in range(len(results))] if i == 0: out = results else: out = [_op.concatenate([out[j], results[j]], axis=0) for j in range(len(results))] out = _expr.TupleWrapper(_expr.Tuple(out), len(out)) return out @classmethod def _impl_v9(cls, inputs, attr, params): body = attr.get("body") num_scan_inputs = attr.get("num_scan_inputs") num_all_inputs = len(inputs) num_state_inputs = len(body.input) - num_scan_inputs num_state_outputs = num_state_inputs num_all_outputs = len(body.output) num_scan_outputs = num_all_outputs - num_state_outputs scan_input_axes = attr.get("scan_input_axes", [0] * num_scan_inputs) scan_input_directions = attr.get("scan_input_directions", [0] * num_scan_inputs) scan_output_axes = list(attr.get("scan_output_axes", [0] * num_scan_outputs)) scan_output_directions = attr.get("scan_output_directions", [0] * num_scan_outputs) # loop count are the same for all scan inputs, so get loop count by first input scan # strided_slice not support dynamic axes, so assume input shape are static max_loop_count = infer_shape(inputs[num_state_inputs])[scan_input_axes[0]] # Create a copy of the body function to prevent the original # from being modified. body = copy.copy(attr["body"]) # Loop inputs will be packed as # [iter_count, loop_deps, scan_outputs] def cond_fn(*loop_inputs): i = loop_inputs[0] return _op.less(i, relay.const(max_loop_count, "int32")) # Get the current graph proto and create a clone for the subgraph graph_scope = GraphProto.current subgraph_scope = GraphProto( graph_scope._shape, graph_scope._dtype, graph_scope._freeze_params ) # Load nodes from outer graph into inner graph. subgraph_scope._nodes = graph_scope._nodes.copy() # Create a list of variables for each value updated in the loop. def get_var(name, val, scan=False): checked_type = infer_type(val) if hasattr(checked_type, "type_annotation"): checked_type = checked_type.type_annotation if hasattr(checked_type, "checked_type"): checked_type = checked_type.checked_type shape = get_const_tuple(checked_type.shape) actual_shape = [] for dim in shape: if isinstance(dim, int) and dim == 0: actual_shape.append(_ty.Any()) else: actual_shape.append(dim) if scan: return _expr.var(name, shape=[_ty.Any()] + actual_shape, dtype=checked_type.dtype) return _expr.var(name, shape=actual_shape, dtype=checked_type.dtype) # Construct variables and initial empty tensors for any scan outputs. # To do this, we'll figure out the output shapes of the body subgraph by importing # it and doing type inference. scan_output_vars = [] scan_output_init = [] if num_scan_outputs > 0: with subgraph_scope: loop_outputs = subgraph_scope.from_onnx( body, graph_scope.opset, get_output_expr=True ) loop_outputs = _expr.TupleWrapper(loop_outputs, len(body.output)) for i in range(num_scan_outputs): name, _, _, _ = get_info(body.output[i + num_state_outputs]) output_node = infer_type(loop_outputs[i + num_state_outputs]) shape = list(get_const_tuple(output_node.checked_type.shape)) if scan_output_axes[i] < 0: scan_output_axes[i] = len(shape) + scan_output_axes[i] + 1 shape.insert(scan_output_axes[i], max_loop_count) dtype = output_node.checked_type.dtype scan_output_vars.append(_expr.var(name, shape=shape, dtype=dtype)) scan_output_init.append(_op.zeros(shape, dtype)) # loop vars = [iter_count, scan_state, scan_out] loop_vars = [ _expr.var("iter", shape=(), dtype="int32"), # iteration count ] loop_vars += [ get_var(body.input[i].name, v) for i, v in enumerate(inputs) if i < num_state_inputs ] loop_vars += scan_output_vars body_input_var_names = ["iter"] + [body.input[i].name for i in range(len(body.input))] # # Now we can remove loop iter variables from our inner loop's inputs. # # This is kind of a hack since we have graph inputs that we don't # # want to treat as actual inputs. while len(body.input) != 0: body.input.pop(0) # Define the loop body, in this function we need to unpack loop inputs, # convert the loop subgraph, and pack outputs for the next iteration. def body_fn(*loop_inputs): # Unpack inputs loop_count = loop_inputs[0] state_vars = list(loop_inputs[1 : 1 + num_state_inputs]) scan_vars = list(loop_inputs[1 + num_state_inputs :]) # body take scan graph scan inputs as original input input_scan_exprs = [] for i in range(num_state_inputs, num_all_inputs): if scan_input_directions[i - num_state_inputs] != 0: input_scan_exprs.append( relay.take( inputs[i], relay.const(max_loop_count - 1, "int32") - loop_count, axis=scan_input_axes[i - num_state_inputs], ) ) else: input_scan_exprs.append( relay.take( inputs[i], loop_count, axis=scan_input_axes[i - num_state_inputs], ) ) # Prepare body inputs by adding them to node dictionary. body_inputs = [loop_count] + state_vars + input_scan_exprs for i, inp in enumerate(body_inputs): subgraph_scope._nodes[body_input_var_names[i]] = inp # Get the output of the current loop using the updated inputs. with subgraph_scope: loop_outputs = subgraph_scope.from_onnx( body, graph_scope.opset, get_output_expr=True ) # Unpack the body outputs and prepare variables for next iteration. new_state_vars = [loop_outputs[i] for i in range(num_state_outputs)] new_scan_vars = [loop_outputs[i] for i in range(num_state_outputs, num_all_outputs)] # Add new scan outputs to tracking combined_scan_outputs = [] for i in range(num_scan_outputs): if scan_output_directions[i] == 0: # append new scan output combined_scan = _op.concatenate( [scan_vars[i], _op.expand_dims(new_scan_vars[i], axis=scan_output_axes[i])], axis=scan_output_axes[i], ) # pop head scan output combined_scan = _op.strided_slice( combined_scan, begin=[1], end=[max_loop_count + 1], strides=[1], axes=[scan_output_axes[i]], ) else: # prepend new scan output combined_scan = _op.concatenate( [_op.expand_dims(new_scan_vars[i], axis=scan_output_axes[i]), scan_vars[i]], axis=scan_output_axes[i], ) # pop tail scan output combined_scan = _op.strided_slice( combined_scan, begin=[0], end=[max_loop_count], strides=[1], axes=[scan_output_axes[i]], ) combined_scan_outputs.append(combined_scan) incr = _expr.const(1, dtype="int32") loop_count = loop_count + incr # Pack loop outputs for next iteration # [iter_count, state_var, scan_var] return [loop_count] + new_state_vars + combined_scan_outputs # Create the loop function. loop = fold_constant(_loops.while_loop(cond_fn, loop_vars, body_fn)) # Now need to run initial values through the graph. init_count = _expr.const(0, dtype="int32") input_states = [inputs[i] for i in range(num_state_inputs)] loop_vals = loop(init_count, *input_states, *scan_output_init) outputs = _expr.TupleWrapper( _expr.Tuple([_expr.TupleGetItem(loop_vals, i + 1) for i in range(num_all_outputs)]), num_all_outputs, ) # Update outer graph with constants found in the subgraph. free_vars = analysis.free_vars(loop) graph_scope._params.update(subgraph_scope._params) graph_scope._nodes.update(subgraph_scope._nodes) for var in free_vars: graph_scope._nodes.update({var.name_hint: var}) return outputs class NonMaxSuppression(OnnxOpConverter): """Operator converter for NonMaxSuppression.""" @classmethod def _impl_v10(cls, inputs, attr, params): # Get parameter values boxes = inputs[0] scores = inputs[1] max_output_boxes_per_class = inputs[2] iou_threshold = inputs[3] score_threshold = inputs[4] boxes_dtype = infer_type(boxes).checked_type.dtype if attr.get("center_point_box", 0) != 0: xc, yc, w, h = _op.split(boxes, 4, axis=2) half_w = w / _expr.const(2.0, boxes_dtype) half_h = h / _expr.const(2.0, boxes_dtype) x1 = xc - half_w x2 = xc + half_w y1 = yc - half_h y2 = yc + half_h boxes = _op.concatenate([y1, x1, y2, x2], axis=2) if iou_threshold is None: iou_threshold = _expr.const(0.0, dtype="float32") if score_threshold is None: score_threshold = _expr.const(0.0, dtype="float32") def conditionally_squeeze_scalar(x): rank = len(infer_shape(x)) assert rank <= 1, "nms thresholds must be scalars" if rank == 1: return _op.squeeze(x, [0]) return x max_output_boxes_per_class = conditionally_squeeze_scalar(max_output_boxes_per_class) iou_threshold = conditionally_squeeze_scalar(iou_threshold) score_threshold = conditionally_squeeze_scalar(score_threshold) nms_out = _op.vision.all_class_non_max_suppression( boxes, scores, max_output_boxes_per_class, iou_threshold, score_threshold ) return _op.strided_slice(nms_out[0], _op.const([0], dtype="int64"), nms_out[1]) class ATen(OnnxOpConverter): """Operator converter for Pytorch ATen ops.""" @classmethod def _op_dispatch(cls, operator, inputs, attr, params): op_map = { "size": cls._size, "arange": cls._arange, "index_put": cls._index_put, "reshape": cls._reshape, "embedding_bag": cls._embedding_bag, } assert operator in op_map, "Operator %s is not supported." % operator return op_map[operator](inputs, attr, params) @classmethod def _size(cls, inputs, attr, params): return _op.take( _op.shape_of(inputs[0], dtype="int64"), _expr.const(-1, dtype="int64"), axis=0, mode="wrap", ) @classmethod def _arange(cls, inputs, attr, params): return _op.arange(inputs[0], inputs[1], inputs[2], dtype="int64") @classmethod def _check_index(cls, indices, values): def unfolding_indices(indices, values): n = len(indices) flatten_indices = [] slices_size = [] for index in indices: flatten_indices.append(_op.reshape(index, _op.const([-1]))) slices_size.append(infer_shape(flatten_indices[-1])[0]) repeat_size = [1] tile_size = [1] for i in range(1, n): repeat_size.append(slices_size[-i] * repeat_size[-1]) tile_size.append(slices_size[i - 1] * tile_size[-1]) repeat_size.reverse() unflod_slices = [] for i in range(n): unflod_slices.append( fold_constant( _op.repeat(_op.tile(flatten_indices[i], (tile_size[i],)), repeat_size[i], 0) ) ) return unflod_slices, _op.reshape(values, _op.const([-1])) values_shape = infer_shape(values) if len(values_shape) != 1: return unfolding_indices(indices, values) return indices, values @classmethod def _index_put(cls, inputs, attr, params): in_tensor = inputs[0] indices, values = cls._check_index(inputs[1 : len(inputs) - 2], inputs[len(inputs) - 2]) accumulate = inputs[len(inputs) - 1].data.asnumpy() != 0 if not accumulate: mode = "update" else: mode = "add" index_tensor = _op.stack(indices, axis=0) return _op.transform.scatter_nd(in_tensor, index_tensor, values, mode) @classmethod def _reshape(cls, inputs, attr, params): return _op.reshape(inputs[0], inputs[1]) @classmethod def _embedding_bag(cls, inputs, attr, params): mode_map = {0: _op.sum, 1: _op.mean, 2: _op.max} mode = attr.get("mode", 1) reduction_fn = mode_map[mode] weights, indices, offsets = inputs[0], inputs[1], inputs[2] offsets_shape = _op.shape_of(offsets, dtype="int64") indices_shape = _op.stack( [ _op.take(offsets_shape, _expr.const(0, dtype="int64")), _expr.const(-1, dtype="int64"), ], axis=0, ) indices = _op.reshape(indices, indices_shape) embedding = _op.take(weights, indices.astype("int64"), axis=0) rembedding = reduction_fn(embedding, axis=1) # EmbeddingBag has 4 outputs for some reason despite only one ever being used. # Fill the rest with 0s. unused_output = _expr.const(0, dtype="float32") return _expr.TupleWrapper( _expr.Tuple((rembedding, unused_output, unused_output, unused_output)), 4 ) @classmethod def _impl_v1(cls, inputs, attr, params): operator = attr.get("operator", None).decode("utf-8") assert operator, "ATen Operator not found" return cls._op_dispatch(operator, inputs, attr, params) class QuantizeLinear(OnnxOpConverter): """Operator converter for QuantizeLinear.""" @classmethod def _impl_v10(cls, inputs, attr, params): data, scale, zp = inputs out_dtype = infer_type(zp).checked_type.dtype return _qnn.op.quantize(data, scale, _op.cast(zp, "int32"), 0, out_dtype) @classmethod def _impl_v13(cls, inputs, attr, params): data, scale, zp = inputs out_dtype = infer_type(zp).checked_type.dtype axis = attr.get("axis", 1) if len(infer_shape(data)) < 2: axis = 0 return _qnn.op.quantize(data, scale, _op.cast(zp, "int32"), axis, out_dtype) class DequantizeLinear(OnnxOpConverter): """Operator converter for QuantizeLinear.""" @classmethod def _impl_v10(cls, inputs, attr, params): data, scale, zp = inputs return _qnn.op.dequantize(data, scale, _op.cast(zp, "int32"), 0) @classmethod def _impl_v13(cls, inputs, attr, params): data, scale, zp = inputs axis = attr.get("axis", 1) if len(infer_shape(data)) <= 1: axis = 0 return _qnn.op.dequantize(data, scale, _op.cast(zp, "int32"), axis) class DynamicQuantizeLinear(OnnxOpConverter): """Operator converter for QuantizeLinear.""" @classmethod def _impl_v11(cls, inputs, attr, params): """This op is deprecated an only supports uint8""" data = inputs[0] data_dtype = infer_type(data).checked_type.dtype zero = _op.const(0, dtype=data_dtype) maximum = _op.maximum(zero, _op.max(data)) minimum = _op.minimum(zero, _op.min(data)) scale = (maximum - minimum) / _op.const(255, dtype=data_dtype) zp = zero - _op.min(data) / scale zp = _op.cast(_op.round(_op.clip(zp, 0, 255)), "uint8") return _expr.TupleWrapper( _expr.Tuple( [_qnn.op.quantize(data, scale, _op.cast(zp, "int32"), 0, "uint8"), scale, zp] ), size=3, ) class QLinearConv(OnnxOpConverter): """Operator converter for QLinearConv.""" @classmethod def _impl_v10(cls, inputs, attr, params): data = inputs[0] x_scale = get_scalar(inputs[1], params) x_zero_point = get_scalar(inputs[2], params, "int32") weight = inputs[3] w_scale = get_scalar(inputs[4], params) w_zero_point = get_scalar(inputs[5], params, "int32") y_scale = fold_constant(get_scalar(inputs[6], params)) y_zero_point = get_scalar(inputs[7], params, "int32") input_shape = infer_shape(data) ndim = len(input_shape) kernel_type = infer_type(weight) kernel_shapes = [get_const_tuple(kernel_type.checked_type.shape)] if "kernel_shape" not in attr: attr["kernel_shape"] = kernel_shapes[0][2:] if "auto_pad" in attr: attr["auto_pad"] = attr["auto_pad"].decode("utf-8") if attr["auto_pad"] in ("SAME_UPPER", "SAME_LOWER"): # Warning: Convolution does not yet support dynamic shapes, # one will need to run dynamic_to_static on this model after import data = autopad( data, attr.get("strides", [1] * (ndim - 2)), attr["kernel_shape"], attr.get("dilations", [1] * (ndim - 2)), pad_value=x_zero_point.data, mode=attr["auto_pad"], ) elif attr["auto_pad"] == "VALID": attr["pads"] = tuple([0 for i in range(ndim - 2)]) elif attr["auto_pad"] == "NOTSET": pass else: msg = 'Value {} in attribute "auto_pad" of operator Conv is invalid.' raise tvm.error.OpAttributeInvalid(msg.format(attr["auto_pad"])) attr.pop("auto_pad") out_channels = kernel_shapes[0][0] dilation = attr.get("dilations", [1] * (ndim - 2)) strides = attr.get("strides", [1] * (ndim - 2)) padding = attr["pads"] if "pads" in attr else 0 groups = attr["group"] if "group" in attr else 1 if ndim != 4: raise tvm.error.OpAttributeInvalid( "Only 2D kernels are supported for operator QLinearConv." ) out = _qnn.op.conv2d( data, weight, x_zero_point, w_zero_point, x_scale, w_scale, kernel_size=attr["kernel_shape"], channels=out_channels, strides=strides, padding=padding, dilation=dilation, groups=groups, ) use_bias = len(inputs) == 9 if use_bias: out = _op.nn.bias_add(out, inputs[8]) out_dtype = infer_type(inputs[7]).checked_type.dtype requantize_scale = _op.multiply(x_scale, w_scale) # requantize requires y_scale to be constant, # if y_scale is not constant, doing dequantize -> quantize if isinstance(y_scale, _expr.Constant): out = _qnn.op.requantize( out, requantize_scale, _op.const(0, dtype="int32"), y_scale, y_zero_point, out_dtype=out_dtype, axis=0, ) else: out = _qnn.op.dequantize(out, requantize_scale, _op.const(0, dtype="int32"), axis=0) out = _qnn.op.quantize(out, y_scale, y_zero_point, axis=0, out_dtype=out_dtype) return out class QLinearAdd(OnnxOpConverter): """Operator converter for QLinearAdd from Microsoft onnxruntime contrib opset.""" @classmethod def _impl_v10(cls, inputs, attr, params): a = inputs[0] a_scale = get_scalar(inputs[1], params) a_zero_point = get_scalar(inputs[2], params, "int32") b = inputs[3] b_scale = get_scalar(inputs[4], params) b_zero_point = get_scalar(inputs[5], params, "int32") c_scale = get_scalar(inputs[6], params) c_zero_point = get_scalar(inputs[7], params, "int32") dtype = infer_type(a).checked_type.dtype ## Onnxruntime doesn't actually do this op in integer, they dequantize to fp32 ## and then requantize afer ## https://github.com/microsoft/onnxruntime/blob/master/onnxruntime/core/mlas/lib/qladd.cpp a = _qnn.op.dequantize( inputs[0], a_scale, a_zero_point ) # , c_scale, c_zero_point, out_dtype = dtype) b = _qnn.op.dequantize( inputs[3], b_scale, b_zero_point ) # , c_scale, c_zero_point, out_dtype = dtype) out = _op.add(a, b) return _qnn.op.quantize(out, c_scale, c_zero_point, out_dtype=dtype) class QLinearMatMul(OnnxOpConverter): """ Operator converter for QLinearMatMul from Microsoft onnxruntime contrib opset. Limitations: - Only supports 2D input tensors. - Not guaranteed to meet the integer-overflow behavior stipulated in the ONNX documentation for this operator. """ @classmethod def _impl_v10(cls, inputs, attr, params): # Some of the ops used below take scalar-like inputs, and may require either # of the following: # # - the input is Const node (not merely an expression that *could* be reduced # to a single Const at graph-compilation time) # # - the input has a specific dtype # # This function attempts to present 'x' in a form that meets both of those # requirements. def try_resolve_to_const_scalar(x, dtype_override=None): x2 = try_resolve_var_to_const(x, params) x3 = ensure_scalar_shape(x2) x_dtype = infer_type(x).checked_type.dtype if (dtype_override is not None) and (dtype_override != x_dtype): x4 = _op.cast(x3, dtype_override) else: x4 = x3 x5 = fold_constant(x4) return x5 # Unpack the inputs and obtain some type info... a, a_scale, a_zp, b, b_scale, b_zp, y_scale, y_zp = inputs a_type = infer_type(a).checked_type # 'T1' in ONNX doc for this op a_scale_type = infer_type(a_scale).checked_type a_zp_type = infer_type(a_zp).checked_type b_type = infer_type(b).checked_type # 'T2' in ONNX doc for this op b_scale_type = infer_type(b_scale).checked_type b_zp_type = infer_type(b_zp).checked_type y_scale_type = infer_type(y_scale).checked_type y_zp_type = infer_type(y_zp).checked_type # 'T3' in ONNX doc for this op a_shape = infer_shape(a) b_shape = infer_shape(b) # Verify type assumptions, based on the ONNX doc for this op... assert a_type.dtype in ["int8", "uint8"] assert a_scale_type.dtype == "float32" assert a_zp_type.dtype == a_type.dtype assert b_type.dtype in ["int8", "uint8"] assert b_scale_type.dtype == "float32" assert b_zp_type.dtype == b_type.dtype assert y_scale_type.dtype == "float32" assert y_zp_type.dtype in ["int8", "uint8"] # TODO: relax this limitation in a future version of this importer. a_rank = len(a_shape) b_rank = len(b_shape) assert (a_rank == 2) and (b_rank == 2), ( "QLinearMatMul importer currently requires both 'a' and 'b' tensors to be 2D, but" " rank(a)={}, rank(b)={}".format(a_rank, b_rank) ) # _qnn.op.dense requires the zero-point values to have dtype int32. a_scale_scalar = try_resolve_to_const_scalar(a_scale) a_zp_scalar = try_resolve_to_const_scalar(a_zp, "int32") b_scale_scalar = try_resolve_to_const_scalar(b_scale) b_zp_scalar = try_resolve_to_const_scalar(b_zp, "int32") y_scale_scalar = try_resolve_to_const_scalar(y_scale) y_zp_scalar = try_resolve_to_const_scalar(y_zp, "int32") # TODO: Confirm that we're using 'num_hidden_units' correctly / as intended with # the '_qnn.op.dense' instance below. num_hidden_units = infer_shape(b)[-1] # - Specify the matmul result dtype as int32, so that hopefully the matmul will use # a 32-bit accumulator as seems to be required by the ONNX op's documentation. # # TL;DR: # The ONNX documentation for this op is clear about acceptable overflow # behavior during the matmul operation: # - The scalar multiplication ops MAY NOT overflow. # - The scalar addition ops, which sum the results of the scalar multiplication, # MAY overflow, but if they do so, it must behave as one would expect during # 32-bit integer-addition overflow. # As of this writing, Relay's qnn.op.dense operator doesn't expose a way for us to # express these constraints. # # TODO: Extend TVM / Relay / TIR / etc. to allow this kind of constraint to be # expressed in a Relay graph. And then update this importer and various TVM # backends accordingly. matmul_result_dtype = "int32" matmul_result = _qnn.op.dense( a, _op.transpose(b), a_zp_scalar, b_zp_scalar, a_scale_scalar, b_scale_scalar, num_hidden_units, matmul_result_dtype, ) # This information might only be found in the C++ code-comments for the # dense.matmul op, but the quantized tensor returned by _qnn.op.dense # has scale==(a_scale_scalar * b_scale_scalar), and zero_point==0. # # 'matmul_result_zp_scalar' has type 'int32' to satisfy input requirements # of the [de/re]quantize ops below. matmul_result_scale_scalar = fold_constant(_op.multiply(a_scale_scalar, b_scale_scalar)) matmul_result_zp_scalar = _op.const(0, dtype="int32") # requantize requires y_scale to be constant, # if y_scale is not constant, doing dequantize -> quantize if isinstance(y_scale_scalar, _expr.Constant): y = _qnn.op.requantize( matmul_result, matmul_result_scale_scalar, matmul_result_zp_scalar, y_scale_scalar, y_zp_scalar, axis=-1, rounding="TONEAREST", out_dtype=y_zp_type.dtype, ) else: matmul_result_deq = _qnn.op.dequantize( matmul_result, matmul_result_scale_scalar, matmul_result_zp_scalar, axis=0 ) y = _qnn.op.quantize( matmul_result_deq, y_scale_scalar, y_zp_scalar, axis=0, out_dtype=y_zp_type.dtype ) return y class QLinearMul(OnnxOpConverter): """Operator converter for QLinearMul from Microsoft onnxruntime contrib opset.""" @classmethod def _impl_v10(cls, inputs, attr, params): a = inputs[0] a_scale = get_scalar(inputs[1], params) a_zero_point = get_scalar(inputs[2], params, "int32") b = inputs[3] b_scale = get_scalar(inputs[4], params) b_zero_point = get_scalar(inputs[5], params, "int32") y_scale = fold_constant(get_scalar(inputs[6], params)) y_zero_point = get_scalar(inputs[7], params, "int32") dtype = infer_type(a).checked_type.dtype ## Onnxruntime doesn't actually do this op in integer, they dequantize to fp32 ## and then requantize afer ## https://github.com/microsoft/onnxruntime/blob/master/onnxruntime/core/mlas/lib/qlmul.cpp a = _qnn.op.dequantize(inputs[0], a_scale, a_zero_point) b = _qnn.op.dequantize(inputs[3], b_scale, b_zero_point) out = _op.multiply(a, b) return _qnn.op.quantize(out, y_scale, y_zero_point, out_dtype=dtype) class QLinearLeakyRelu(OnnxOpConverter): """Operator converter for QLinearLeakyRelu from Microsoft onnxruntime contrib opset.""" @classmethod def _impl_v10(cls, inputs, attr, params): a_scale = get_scalar(inputs[1], params) a_zero_point = get_scalar(inputs[2], params, "int32") y_scale = fold_constant(get_scalar(inputs[3], params)) y_zero_point = get_scalar(inputs[4], params, "int32") alpha = float(attr.get("alpha", 1.0)) dtype = infer_type(inputs[0]).checked_type.dtype # Onnxruntime doesn't actually do this op in integer, they dequantize to fp32 # and then requantize afer (according to documentation below) # https://github.com/microsoft/onnxruntime/blob/master/docs/ContribOperators.md#com.microsoft.QLinearLeakyRelu a = _qnn.op.dequantize(inputs[0], a_scale, a_zero_point) out = _op.nn.leaky_relu(a, alpha) return _qnn.op.quantize(out, y_scale, y_zero_point, out_dtype=dtype) class QLinearSigmoid(OnnxOpConverter): """Operator converter for QLinearSigmoid from Microsoft onnxruntime contrib opset.""" @classmethod def _impl_v10(cls, inputs, attr, params): x = inputs[0] x_scale = get_scalar(inputs[1], params) x_zero_point = get_scalar(inputs[2], params, "int32") y_scale = fold_constant(get_scalar(inputs[3], params)) y_zero_point = get_scalar(inputs[4], params, "int32") dtype = infer_type(x).checked_type.dtype ## Apparently, onnxruntime doesn't do this op in integer, they dequantize to fp32 ## and then requantize after: ## https://github.com/microsoft/onnxruntime/blob/master/onnxruntime/core/ ## providers/dml/DmlExecutionProvider/src/GraphTransformer.cpp#L245 x = _qnn.op.dequantize(x, x_scale, x_zero_point) out = _op.sigmoid(x) return _qnn.op.quantize(out, y_scale, y_zero_point, out_dtype=dtype) class QLinearConcat(OnnxOpConverter): """Operator converter for QLinearConcat from Microsoft onnxruntime contrib opset.""" @classmethod def _impl_v1(cls, inputs, attr, params): # which axis to concat on axis = attr["axis"] y_scale = fold_constant(get_scalar(inputs[0], params)) y_zero_point = get_scalar(inputs[1], params, "int32") # input tensors, scales, zero_points assert ( len(inputs) % 3 == 2 ), "Additional input count must be a multiple of 3 -- tensor/scale/zero_point tuples" tensors = [] scales = [] zero_points = [] for i in range(2, len(inputs), 3): tensors.append(inputs[i]) scales.append(get_scalar(inputs[i + 1], params)) zero_points.append(get_scalar(inputs[i + 2], params, "int32")) return _qnn.op.concatenate(tensors, scales, zero_points, y_scale, y_zero_point, axis) class ConvInteger(OnnxOpConverter): """Operator converter for ConvInteger.""" @classmethod def _impl_v10(cls, inputs, attr, params): data = inputs[0] weight = inputs[1] data_zp = inputs[2] weight_zp = inputs[3] if data_zp is None: data_zp = _expr.const(0, "int32") if weight_zp is None: weight_zp = _expr.const(0, "int32") input_type = infer_type(data) input_shape = get_const_tuple(input_type.checked_type.shape) ndim = len(input_shape) kernel_type = infer_type(weight) kernel_shape = get_const_tuple(kernel_type.checked_type.shape) if "kernel_shape" not in attr: attr["kernel_shape"] = kernel_shape[2:] if "auto_pad" in attr: attr["auto_pad"] = attr["auto_pad"].decode("utf-8") if attr["auto_pad"] in ("SAME_UPPER", "SAME_LOWER"): # Warning: Convolution does not yet support dynamic shapes, # one will need to run dynamic_to_static on this model after import data = autopad( data, attr.get("strides", [1] * (ndim - 2)), attr["kernel_shape"], attr.get("dilations", [1] * (ndim - 2)), pad_value=data_zp, mode=attr["auto_pad"], ) elif attr["auto_pad"] == "VALID": attr["pads"] = tuple([0 for i in range(ndim - 2)]) elif attr["auto_pad"] == "NOTSET": pass else: msg = 'Value {} in attribute "auto_pad" of operator Conv is invalid.' raise tvm.error.OpAttributeInvalid(msg.format(attr["auto_pad"])) attr.pop("auto_pad") out_channels = kernel_shape[0] dilation = attr.get("dilations", [1] * (ndim - 2)) strides = attr.get("strides", [1] * (ndim - 2)) padding = attr["pads"] if "pads" in attr else 0 groups = attr["group"] if "group" in attr else 1 if ndim != 4: raise tvm.error.OpAttributeInvalid( "Only 2D kernels are supported for operator ConvInteger." ) return _qnn.op.conv2d( data, weight, _op.cast(data_zp, "int32"), _op.cast(weight_zp, "int32"), _expr.const(1.0, "float32"), _expr.const(1.0, "float32"), kernel_size=attr["kernel_shape"], channels=out_channels, strides=strides, padding=padding, dilation=dilation, groups=groups, ) class BitShift(OnnxOpConverter): """Operator converter for NonZero""" @classmethod def _impl_v11(cls, inputs, attr, params): if len(inputs) != 2: raise ValueError("Bitshift expects 2 inputs") direction = attr.get("direction", "LEFT").decode("ascii") if direction == "LEFT": out = _op.left_shift(*inputs) elif direction == "RIGHT": out = _op.right_shift(*inputs) else: raise ValueError("Unsupported Shift Direction: " + direction) return out class Unique(OnnxOpConverter): """Operator converter for unique""" @classmethod def _impl_v11(cls, inputs, attr, params): if len(inputs) != 1: raise ValueError("Unique expects 1 input") data = inputs[0] axis = attr.get("axis", None) if axis is None: # If axis is None, flatten the input before calling unique data = _op.reshape(data, _op.const([-1])) else: data_shape = infer_shape(data) if len(data_shape) != 1: raise ValueError("TVM only supports 1D Unique operator.") is_sorted = attr.get("sorted", 1) # sorted is 0 or 1, 1 by default # ONNX documentation lists return_counts as optional but there is no input to specify # whether it is returned. Therefore we'll just always return it. unique = _op.unique(data, is_sorted=(is_sorted == 1), return_counts=True) num_unique = unique[3] trim_unique_lambda = lambda input: _op.strided_slice(input, _op.const([0]), num_unique) unique_vals = trim_unique_lambda(unique[0]) indices = _op.cast(trim_unique_lambda(unique[1]), "int64") # ONNX always returns int64 inverse_indices = _op.cast(unique[2], "int64") # ONNX always returns int64 counts = _op.cast(trim_unique_lambda(unique[4]), "int64") # ONNX always returns int64 # ONNX unique returns unique, indices, inverse_indices, (optional) counts return _expr.TupleWrapper(_expr.Tuple([unique_vals, indices, inverse_indices, counts]), 4) class Einsum(OnnxOpConverter): """Operator converter for Einsum""" @classmethod def _impl_v12(cls, inputs, attr, params): equation = attr["equation"].decode("utf-8") return _op.einsum(inputs, equation) class RandomNormal(OnnxOpConverter): """Operator converter for random_normal""" @classmethod def _impl_v1(cls, inputs, attr, params): dtype = get_type(attr.get("dtype", 1)) mean = attr.get("mean", 0.0) scale = attr.get("scale", 1.0) seed = attr.get("seed", None) shape = attr["shape"] assert dtype in [ "float32", "float64", ], "Only float random value generation is currently supported." if seed is None: seed = np.random.randint(1e6) else: seed = int(seed) key = _random.threefry_key(seed) output = _op.random.normal(key, shape, dtype=dtype, mean=mean, scale=scale) _, vals = _expr.TupleWrapper(output, 2) return vals class RandomNormalLike(OnnxOpConverter): """Operator converter for random_normal_like""" @classmethod def _impl_v1(cls, inputs, attr, params): dtype = attr.get("dtype", None) scale = attr.get("scale", 1.0) mean = attr.get("mean", 0.0) seed = attr.get("seed", None) shape = infer_shape(inputs[0]) if dtype is None: dtype = infer_type(inputs[0]).checked_type.dtype else: dtype = get_type(dtype) assert dtype in [ "float32", "float64", ], "Only float random value generation is currently supported." if seed is None: seed = np.random.randint(1e6) else: seed = int(seed) key = _random.threefry_key(seed) output = _op.random.normal(key, shape, dtype=dtype, mean=mean, scale=scale) _, vals = _expr.TupleWrapper(output, 2) return vals class RandomUniform(OnnxOpConverter): """Operator converter for random_uniform""" @classmethod def _impl_v1(cls, inputs, attr, params): dtype = get_type(attr.get("dtype", 1)) high = attr.get("high", 1.0) low = attr.get("low", 0.0) seed = attr.get("seed", None) shape = attr["shape"] assert dtype in [ "float32", "float64", ], "Only float random value generation is currently supported." if seed is None: seed = np.random.randint(1e6) else: seed = int(seed) key = _random.threefry_key(seed) output = _op.random.uniform(key, shape, dtype=dtype, low=low, high=high) _, vals = _expr.TupleWrapper(output, 2) return vals class RandomUniformLike(OnnxOpConverter): """Operator converter for random_uniform_like""" @classmethod def _impl_v1(cls, inputs, attr, params): dtype = attr.get("dtype", None) high = attr.get("high", 1.0) low = attr.get("low", 0.0) seed = attr.get("seed", None) shape = infer_shape(inputs[0]) if dtype is None: dtype = infer_type(inputs[0]).checked_type.dtype else: dtype = get_type(dtype) assert dtype in [ "float32", "float64", ], "Only float random value generation is currently supported." if seed is None: seed = np.random.randint(1e6) else: seed = int(seed) key = _random.threefry_key(seed) output = _op.random.uniform(key, shape, dtype=dtype, low=low, high=high) _, vals = _expr.TupleWrapper(output, 2) return vals class NegativeLogLikelihoodLoss(OnnxOpConverter): """Operator converter for NegativeLogLikehoodLoss""" VALID_REDUCTIONS = {"mean", "sum", "none"} @classmethod def run_calculation( cls: "NegativeLogLikelihoodLoss", input_tensor: relay.Expr, target_tensor: relay.Expr, weight_tensor: Optional[relay.Expr], ignore_index: int, ): """Run calculation for NegativeLogLikelihood, returning output tensor and weight tensor used for mean-style reductions. """ # Convert negative indices --> positive indices for gather ops, note we have to # use the original target tensor to interact with ignore_index to have proper behavior. normalized_target_tensor = normalize_gather_indices(input_tensor, target_tensor, 1) if weight_tensor is None: channels = infer_shape(input_tensor)[1] weight_tensor = relay.ones( [channels], dtype=infer_type(input_tensor).checked_type.dtype, ) loss = -relay.gather( input_tensor, axis=1, indices=relay.expand_dims(normalized_target_tensor, 1), ) loss = relay.squeeze(loss, axis=[1]) expanded_normalized_target_tensor = relay.expand_dims(normalized_target_tensor, 0) expanded_normalized_target_tensor = relay.nn.batch_flatten( expanded_normalized_target_tensor ) flattened_weights = relay.gather_nd(weight_tensor, expanded_normalized_target_tensor) select_weights = relay.reshape_like(flattened_weights, loss) loss *= select_weights if ignore_index is not None: # "Ignore" values whose target is the ignore_index mask_tensor = relay.equal( target_tensor, relay.const(ignore_index, dtype=target_tensor.type_annotation.dtype) ) mask_tensor = relay.const(1, dtype="int8") - relay.cast(mask_tensor, "int8") loss = relay.where( mask_tensor, loss, relay.const(0, infer_type(loss).checked_type.dtype) ) # This is not explained super clearly in the onnx spec, but masked values don't # contribute toward the final value in reduction select_weights *= relay.cast_like(mask_tensor, select_weights) weight_total = relay.sum(select_weights) return loss, weight_total @classmethod def _impl_v13(cls, inputs, attr, params): ignore_index = attr.get("ignore_index", None) reduction = attr.get("reduction", b"mean").decode("utf-8") if reduction not in cls.VALID_REDUCTIONS: raise ValueError( f"Unknown reduction type {reduction}, choices are {cls.VALID_REDUCTIONS}" ) input_tensor, target_tensor = inputs[0], inputs[1] if len(inputs) == 3: weight_tensor = inputs[2] else: weight_tensor = None loss, weight_total = cls.run_calculation( input_tensor, target_tensor, weight_tensor=weight_tensor, ignore_index=ignore_index, ) if reduction == "mean": return relay.sum(loss) / weight_total if reduction == "sum": return relay.sum(loss) # Case reduction == 'none' return loss class SoftmaxCrossEntropyLoss(OnnxOpConverter): """Operator converter for SCE_loss""" @classmethod def _impl_v13(cls, inputs, attr, params): ignore_index = attr.get("ignore_index", None) reduction = attr.get("reduction", b"mean").decode("utf-8") input_tensor, target_tensor = inputs[0], inputs[1] if len(inputs) == 3: weight_tensor = inputs[2] else: weight_tensor = None get_log_prob = attr["tvm_custom"]["num_outputs"] == 2 log_softmax_tensor = LogSoftmax.run_calculation(input_tensor, axes=[1]) loss, weight_total = NegativeLogLikelihoodLoss.run_calculation( log_softmax_tensor, target_tensor, weight_tensor, ignore_index=ignore_index, ) if reduction == "mean": loss = relay.sum(loss) / weight_total elif reduction == "sum": loss = relay.sum(loss) if get_log_prob: return relay.TupleWrapper(relay.Tuple((loss, log_softmax_tensor)), 2) return loss class Adagrad(OnnxOpConverter): """Operator converter for adagrad op.""" @classmethod def _impl_v1(cls, inputs, attr, params): decay_factor = attr.get("decay_factor", 0.0) epsilon = attr.get("epsilon", 0.0) norm_coefficient = attr.get("norm_coefficient", 0.0) R = inputs[0] T = inputs[1] # convert attributes to constants, proper types dtype_inputs = infer_type(inputs[3]).checked_type.dtype decay_factor = relay.const(decay_factor, dtype=dtype_inputs) epsilon = relay.const(epsilon, dtype=dtype_inputs) norm_coefficient = relay.const(norm_coefficient, dtype=dtype_inputs) T = relay.cast_like(T, inputs[3]) assert ( len(inputs) - 2 ) % 3 == 0, f"Expect triplets for remaining inputs, found {len(inputs) - 2}" # Remaining inputs are: # [x_1, x_2 ..., x_1_gradient, x_2_gradient, ... x_1_sq_g, x_2_sq_g...] num_input_tensors = (len(inputs) - 2) // 3 output_tensors = [] output_accumulated_squared_gradients = [] for i in range(num_input_tensors): x = inputs[i + 2] gradient = inputs[i + 2 + num_input_tensors] accumulated_squared_gradient = inputs[i + 2 + 2 * num_input_tensors] r = R / (relay.const(1.0, dtype=dtype_inputs) + T * decay_factor) g_regularized = norm_coefficient * x + gradient new_accumulated_squared_gradient = ( accumulated_squared_gradient + g_regularized * g_regularized ) h_adaptive = relay.sqrt(new_accumulated_squared_gradient) + epsilon x_new = x - r * g_regularized / h_adaptive output_tensors.append(x_new) output_accumulated_squared_gradients.append(new_accumulated_squared_gradient) # append lists together, momentums come after result tensors result = output_tensors + output_accumulated_squared_gradients return _expr.TupleWrapper(_expr.Tuple(result), len(result)) class Adam(OnnxOpConverter): """Operator converter for Adam op.""" @classmethod def _impl_v1(cls, inputs, attr, params): alpha = attr.get("alpha", 0.9) beta = attr.get("beta", 0.999) # Note in the docs epsilon default is 0.0 but in the tests it is set to 1e-2: # https://git.io/Ju5C4 epsilon = attr.get("epsilon", 1e-2) norm_coefficient = attr.get("norm_coefficient", 0.0) norm_coefficient_post = attr.get("norm_coefficient_post", 0.0) R = inputs[0] T = inputs[1] assert ( len(inputs) - 2 ) % 4 == 0, f"Expect 4-lets for remaining inputs, found {len(inputs) - 2}" # convert attributes to constants, proper types dtype_inputs = infer_type(inputs[3]).checked_type.dtype inverse_alpha = relay.const(1 - alpha, dtype=dtype_inputs) alpha = relay.const(alpha, dtype=dtype_inputs) inverse_beta = relay.const(1 - beta, dtype=dtype_inputs) beta = relay.const(beta, dtype=dtype_inputs) epsilon = relay.const(epsilon, dtype=dtype_inputs) norm_coefficient = relay.const(norm_coefficient, dtype=dtype_inputs) norm_coefficient_post = relay.const(norm_coefficient_post, dtype=dtype_inputs) one = relay.const(1, dtype=dtype_inputs) T = relay.cast_like(T, inputs[3]) # Remaining inputs are: # [x_1, x_2 ..., x_1_grad, x_2_grad, ... x_1_g_accum, x_2_g_accum..., x_1_g_sq_accum, ...] num_input_tensors = (len(inputs) - 2) // 4 output_tensors = [] output_accumulated_gradients = [] output_accumulated_squared_gradients = [] for i in range(num_input_tensors): x = inputs[i + 2] g = inputs[i + 2 + num_input_tensors] v = inputs[i + 2 + 2 * num_input_tensors] h = inputs[i + 2 + 3 * num_input_tensors] g_regularized = norm_coefficient * x + g v_new = alpha * v + inverse_alpha * g_regularized h_new = beta * h + inverse_beta * g_regularized * g_regularized h_sqrt = relay.sqrt(h_new) + epsilon true_branch = R * relay.sqrt(one - relay.power(beta, T)) / (one - relay.power(alpha, T)) R_adjusted = relay.If(T > relay.const(0, dtype=dtype_inputs), true_branch, R) x_new = x - R_adjusted * (v_new / h_sqrt) x_result = (one - norm_coefficient_post) * x_new output_tensors.append(x_result) output_accumulated_gradients.append(v_new) output_accumulated_squared_gradients.append(h_new) # append lists together to get final result result = ( output_tensors + output_accumulated_gradients + output_accumulated_squared_gradients ) return _expr.TupleWrapper(_expr.Tuple(result), len(result)) class Momentum(OnnxOpConverter): """Operator converter for Momentum op.""" @classmethod def _impl_v1(cls, inputs, attr, params): alpha = attr["alpha"] beta = attr["beta"] mode = attr["mode"].decode("utf-8") norm_coefficient = attr["norm_coefficient"] assert mode in ["nesterov", "standard"], f"Unknown momentum mode {mode}" R = inputs[0] T = inputs[1] assert ( len(inputs) - 2 ) % 3 == 0, f"Expect triplets for remaining inputs, found {len(inputs) - 2}" # Remaining inputs are: # [x_1, x_2 ..., x_1_gradient, x_2_gradient, ... x_1_momentum, x_2_momentum...] num_input_tensors = (len(inputs) - 2) // 3 # convert attributes to constants dtype_inputs = infer_type(inputs[3]).checked_type.dtype alpha = relay.const(alpha, dtype=dtype_inputs) beta = relay.const(beta, dtype=dtype_inputs) norm_coefficient = relay.const(norm_coefficient, dtype=dtype_inputs) default_beta = relay.const(1.0, dtype=dtype_inputs) # Calculate updated values for every input output_tensors = [] output_momentums = [] for i in range(num_input_tensors): x = inputs[i + 2] gradient = inputs[i + 2 + num_input_tensors] momentum = inputs[i + 2 + 2 * num_input_tensors] g_regularized = norm_coefficient * x + gradient beta_adjusted = relay.If(T > relay.const(0, dtype="int64"), beta, default_beta) new_momentum = alpha * momentum + beta_adjusted * g_regularized if mode == "standard": x_output = x - R * new_momentum else: # mode == 'nesterov' x_output = x - R * (g_regularized + alpha * new_momentum) output_tensors.append(x_output) output_momentums.append(new_momentum) # append lists together, momentums come after result tensors result = output_tensors + output_momentums return _expr.TupleWrapper(_expr.Tuple(result), len(result)) # compatible operators that do NOT require any conversion. _identity_list = [] # _convert_map defines maps of name to converter functor(callable) # for 1 to 1 mapping, use Renamer if nothing but name is different # use AttrCvt if attributes need to be converted # for 1 to N mapping(composed), use custom callable functions # for N to 1 mapping, currently not supported(?) def _get_convert_map(opset): return { # defs/experimental "Identity": Renamer("copy"), "Affine": Affine.get_converter(opset), "BitShift": BitShift.get_converter(opset), "ThresholdedRelu": ThresholdedRelu.get_converter(opset), "ScaledTanh": ScaledTanh.get_converter(opset), "ParametricSoftplus": ParametricSoftPlus.get_converter(opset), "Constant": Constant.get_converter(opset), "ConstantOfShape": ConstantOfShape.get_converter(opset), # 'GivenTensorFill' "FC": AttrCvt("dense", ignores=["axis", "axis_w"]), "Scale": Scale.get_converter(opset), # 'GRUUnit' # 'ATen' # 'ImageScaler' # 'MeanVarianceNormalization' # 'Crop' # 'Embedding' "Upsample": Upsample.get_converter(opset), "SpatialBN": BatchNorm.get_converter(opset), # defs/generator # 'Constant' # Implemented # 'RandomUniform' # 'RandomNormal' # 'RandomUniformLike' # 'RandomNormalLike' # defs/logical # defs/math "Add": Add.get_converter(opset), "Sub": Sub.get_converter(opset), "Mul": Mul.get_converter(opset), "Div": Div.get_converter(opset), "Neg": Renamer("negative"), "Abs": Absolute.get_converter(opset), "Reciprocal": Reciprocal.get_converter(opset), "Floor": Renamer("floor"), "Ceil": Renamer("ceil"), "Round": Renamer("round"), "IsInf": IsInf.get_converter(opset), "IsNaN": Renamer("isnan"), "Sqrt": Renamer("sqrt"), "Relu": Renamer("relu"), "Celu": Celu.get_converter(opset), "LeakyRelu": Renamer("leaky_relu"), "Selu": Selu.get_converter(opset), "Elu": Elu.get_converter(opset), "Exp": Renamer("exp"), "Greater": Renamer("greater"), "GreaterOrEqual": Renamer("greater_equal"), "Less": Renamer("less"), "LessOrEqual": Renamer("less_equal"), "Log": Renamer("log"), "Acos": Renamer("acos"), "Acosh": Renamer("acosh"), "Asin": Renamer("asin"), "Asinh": Renamer("asinh"), "Atan": Renamer("atan"), "Atanh": Renamer("atanh"), "Cos": Renamer("cos"), "Cosh": Renamer("cosh"), "Sin": Renamer("sin"), "Sinh": Renamer("sinh"), "Tan": Renamer("tan"), "Tanh": Renamer("tanh"), "Pow": Pow.get_converter(opset), "PRelu": Prelu.get_converter(opset), "Sigmoid": Renamer("sigmoid"), "HardSigmoid": HardSigmoid.get_converter(opset), "Max": Maximum.get_converter(opset), "Min": Minimum.get_converter(opset), "Sum": Sum.get_converter(opset), "Mean": Mean.get_converter(opset), "Clip": Clip.get_converter(opset), "Softplus": Softplus.get_converter(opset), # softmax default axis is different in onnx "Softmax": Softmax.get_converter(opset), "LogSoftmax": LogSoftmax.get_converter(opset), "OneHot": OneHot.get_converter(opset), "Hardmax": Hardmax.get_converter(opset), "Shrink": Shrink.get_converter(opset), "Softsign": Softsign.get_converter(opset), "Gemm": Gemm.get_converter(opset), "MatMul": MatMul.get_converter(opset), "MatMulInteger16": MatMulInteger16.get_converter(opset), "Mod": Mod.get_converter(opset), "Xor": Renamer("logical_xor"), # defs/nn "AveragePool": AveragePool.get_converter(opset), "LpPool": LpPool.get_converter(opset), "GlobalLpPool": GlobalLpPool.get_converter(opset), "MaxPool": MaxPool.get_converter(opset), "MaxUnpool": MaxUnpool.get_converter(opset), "Conv": Conv.get_converter(opset), "ConvTranspose": ConvTranspose.get_converter(opset), "GlobalAveragePool": GlobalAveragePool.get_converter(opset), "GlobalMaxPool": GlobalMaxPool.get_converter(opset), "BatchNormalization": BatchNorm.get_converter(opset), "InstanceNormalization": InstanceNorm.get_converter(opset), # 'LpNormalization' "Dropout": AttrCvt("dropout", {"ratio": "rate"}, ignores=["is_test"]), "Flatten": Flatten.get_converter(opset), "LRN": LRN.get_converter(opset), # Recurrent Layers "LSTM": LSTM.get_converter(opset), "GRU": GRU.get_converter(opset), # defs/vision "MaxRoiPool": MaxRoiPool.get_converter(opset), "RoiAlign": RoiAlign.get_converter(opset), "NonMaxSuppression": NonMaxSuppression.get_converter(opset), # defs/reduction "ReduceMax": ReduceMax.get_converter(opset), "ReduceMin": ReduceMin.get_converter(opset), "ReduceSum": ReduceSum.get_converter(opset), "ReduceMean": ReduceMean.get_converter(opset), "ReduceProd": ReduceProd.get_converter(opset), "ReduceLogSumExp": ReduceLogSumExp.get_converter(opset), "ReduceLogSum": ReduceLogSum.get_converter(opset), "ReduceSumSquare": ReduceSumSquare.get_converter(opset), "ReduceL1": ReduceL1.get_converter(opset), "ReduceL2": ReduceL2.get_converter(opset), # defs/sorting "ArgMax": ArgMax.get_converter(opset), "ArgMin": ArgMin.get_converter(opset), "TopK": TopK.get_converter(opset), # defs/tensor "Cast": Cast.get_converter(opset), "Reshape": Reshape.get_converter(opset), "Expand": Expand.get_converter(opset), "Concat": Concat.get_converter(opset), "Split": Split.get_converter(opset), "Slice": Slice.get_converter(opset), "Transpose": AttrCvt("transpose", {"perm": "axes"}), "DepthToSpace": DepthToSpace.get_converter(opset), "SpaceToDepth": SpaceToDepth.get_converter(opset), "Gather": Gather.get_converter(opset), "GatherElements": GatherElements.get_converter(opset), "GatherND": GatherND.get_converter(opset), "Compress": Compress.get_converter(opset), "Size": AttrCvt("ndarray_size", extras={"dtype": "int64"}), "Scatter": Scatter.get_converter(opset), "ScatterElements": Scatter.get_converter(opset), "ScatterND": ScatterND.get_converter(opset), "EyeLike": EyeLike.get_converter(opset), "Squeeze": Squeeze.get_converter(opset), "Unsqueeze": Unsqueeze.get_converter(opset), "Pad": Pad.get_converter(opset), "Shape": Shape.get_converter(opset), "Sign": Sign.get_converter(opset), "Equal": Equal.get_converter(opset), "Not": Not.get_converter(opset), "And": And.get_converter(opset), "Tile": Tile.get_converter(opset), "Erf": Erf.get_converter(opset), "Where": Where.get_converter(opset), "Or": Or.get_converter(opset), "Resize": Resize.get_converter(opset), "NonZero": NonZero.get_converter(opset), "Range": Range.get_converter(opset), "CumSum": CumSum.get_converter(opset), "Unique": Unique.get_converter(opset), "Einsum": Einsum.get_converter(opset), # defs/control_flow "Loop": Loop.get_converter(opset), "If": If.get_converter(opset), # Torch ATen Dispatcher. "ATen": ATen.get_converter(opset), # Quantization "QuantizeLinear": QuantizeLinear.get_converter(opset), "DequantizeLinear": DequantizeLinear.get_converter(opset), "DynamicQuantizeLinear": DynamicQuantizeLinear.get_converter(opset), "ReverseSequence": ReverseSequence.get_converter(opset), "QLinearConv": QLinearConv.get_converter(opset), "QLinearConcat": QLinearConcat.get_converter(opset), "QLinearAdd": QLinearAdd.get_converter(opset), "QLinearMatMul": QLinearMatMul.get_converter(opset), "QLinearMul": QLinearMul.get_converter(opset), "QLinearSigmoid": QLinearSigmoid.get_converter(opset), "ConvInteger": ConvInteger.get_converter(opset), "QLinearAveragePool": QLinearAveragePool.get_converter(opset), "QLinearGlobalAveragePool": QLinearGlobalAveragePool.get_converter(opset), "QLinearLeakyRelu": QLinearLeakyRelu.get_converter(opset), # Random number generation. "RandomNormal": RandomNormal.get_converter(opset), "RandomNormalLike": RandomNormalLike.get_converter(opset), "RandomUniform": RandomUniform.get_converter(opset), "RandomUniformLike": RandomUniformLike.get_converter(opset), # Loss functions / training "NegativeLogLikelihoodLoss": NegativeLogLikelihoodLoss.get_converter(opset), "SoftmaxCrossEntropyLoss": SoftmaxCrossEntropyLoss.get_converter(opset), "Adagrad": Adagrad.get_converter(opset), "Adam": Adam.get_converter(opset), "Momentum": Momentum.get_converter(opset), "Scan": Scan.get_converter(opset), } class GraphProto: """A helper class for handling Relay expression copying from pb2.GraphProto. Definition: https://github.com/onnx/onnx/blob/master/onnx/onnx.proto Parameters ---------- shape : dict of str to tuple, optional The input shape to the graph dtype : str or dict of str to str The input types to the graph freeze_params: bool If this parameter is true, the importer will take any provided onnx input values (weights, shapes, etc) and embed them into the relay model as Constants instead of variables. This allows more aggressive optimizations at compile time and helps in making models static if certain inputs represent attributes relay would traditionally consider compile-time constants. """ current = None def __init__(self, shape, dtype, freeze_params=False): self._nodes = {} self._params = {} self._inputs = {} self._renames = {} self._num_input = 0 self._num_param = 0 self._shape = shape.copy() if shape else {} self._input_names = [] self._dtype = dtype self.opset = None self._freeze_params = freeze_params def __enter__(self): self._old_manager = GraphProto.current GraphProto.current = self return self def __exit__(self, ptype, value, trace): GraphProto.current = self._old_manager def freeze(self, func, params): bind_map = {} for name in params.keys(): if name in self._nodes.keys(): bind_map[self._nodes[name]] = _expr.const(params[name]) body = _expr.bind(func.body, bind_map) fn = _function.Function(analysis.free_vars(body), body) return fn, {} def from_onnx(self, graph, opset, get_output_expr=False): """Construct Relay expression from ONNX graph. Onnx graph is a python protobuf object. The companion parameters will be handled automatically. However, the input names from onnx graph is vague, mixing inputs and network weights/bias such as "1", "2"... For convenience, we rename the `real` input names to "input_0", "input_1"... And renaming parameters to "param_0", "param_1"... Parameters ---------- graph : onnx protobuf object The loaded onnx graph opset : opset version get_output_expr: bool If set to true, this conversion will return each output expression rather than a packaged module. This can be useful when converting subgraphs to relay. Returns ------- mod : tvm.IRModule The returned relay module params : dict A dict of name: tvm.nd.array pairs, used as pretrained weights """ self.opset = opset # parse network inputs to relay, aka parameters for init_tensor in graph.initializer: if not init_tensor.name.strip(): raise ValueError("Tensor's name is required.") array = self._parse_array(init_tensor) if self._freeze_params: self._nodes[init_tensor.name] = _expr.const(array) else: self._params[init_tensor.name] = array self._nodes[init_tensor.name] = new_var( init_tensor.name, shape=self._params[init_tensor.name].shape, dtype=self._params[init_tensor.name].dtype, ) for i in graph.input: # from onnx v0.2, GraphProto.input has type ValueInfoProto, # and the name is 'i.name' i_name, i_shape, d_type, i_shape_name = get_info(i) if i_name in self._params: # i is a param instead of input self._num_param += 1 self._params[i_name] = self._params.pop(i_name) self._nodes[i_name] = new_var( i_name, shape=self._params[i_name].shape, dtype=self._params[i_name].dtype ) elif i_name in self._nodes: continue else: self._num_input += 1 self._input_names.append(i_name) if i_name in self._shape: i_shape = self._shape[i_name] else: if "?" in str(i_shape): warning_msg = ( "Input %s has unknown dimension shapes: %s. " "Specifying static values may improve performance" % (i_name, str(i_shape_name)) ) warnings.warn(warning_msg) if isinstance(self._dtype, dict): dtype = self._dtype[i_name] if i_name in self._dtype else d_type else: dtype = d_type self._nodes[i_name] = new_var(i_name, shape=i_shape, dtype=dtype) self._inputs[i_name] = self._nodes[i_name] # Only check user inputs in the outer-most graph scope. if self._old_manager is None: assert all( [name in self._input_names for name in self._shape.keys()] ), "User specified the shape for inputs that weren't found in the graph: " + str( self._shape ) # get list of unsupported ops convert_map = _get_convert_map(opset) unsupported_ops = set() for node in graph.node: op_name = node.op_type if ( op_name not in convert_map and op_name != "Constant" and op_name not in _identity_list ): unsupported_ops.add(op_name) if unsupported_ops: msg = "The following operators are not supported for frontend ONNX: " msg += ", ".join(unsupported_ops) raise tvm.error.OpNotImplemented(msg) # construct nodes, nodes are stored as directed acyclic graph for node in graph.node: op_name = node.op_type attr = self._parse_attr(node.attribute) # Create and populate input list. inputs = onnx_input() for i in node.input: if i != "": inputs.append(self._nodes[self._renames.get(i, i)]) else: inputs.append(None) i_name = self._parse_value_proto(node) node_output = self._fix_outputs(op_name, node.output) attr["tvm_custom"] = {} attr["tvm_custom"]["name"] = i_name attr["tvm_custom"]["num_outputs"] = len(node_output) op = self._convert_operator(op_name, inputs, attr, opset) if not isinstance(op, _expr.TupleWrapper): outputs_num = 1 else: outputs_num = len(op) if outputs_num == 1: op = fold_constant(op) else: op = _expr.TupleWrapper(fold_constant(op.astuple()), len(op)) if outputs_num > 1: # ONNX supports optional outputs for some nodes. # This block searches for missing outputs in the ONNX graph # and removes any unneeded ops valid_outputs = [False] * outputs_num for i, output in enumerate(node_output): if output != "": valid_outputs[i] = True # If we have outputs ONNX isn't expecting, we need to drop them if not all(valid_outputs): tup = op.astuple() # TupleWrapper can also wrap ops with TupleType outputs if isinstance(tup, _expr.Tuple): # For tuples, we extract the fields instead of using GetTupleItem outputs = [tup.fields[i] for i, valid in enumerate(valid_outputs) if valid] else: # For call nodes, we need to GetTupleItem outputs = [op[i] for i, valid in enumerate(valid_outputs) if valid] # Create the new op with valid outputs if len(outputs) == 1: op = outputs[0] elif len(outputs) != outputs_num: op = _expr.TupleWrapper(_expr.Tuple(outputs), len(outputs)) # Drop invalid outputs for the onnx node outputs_num = len(outputs) node_output = [output for output in node_output if output != ""] assert ( len(node_output) == outputs_num ), "Number of output mismatch {} vs {} in {}.".format( len(node_output), outputs_num, op_name ) if outputs_num == 1: self._nodes[node_output[0]] = op else: for k, i in zip(list(node_output), range(len(node_output))): self._nodes[k] = op[i] # now return the outputs outputs = [self._nodes[self._parse_value_proto(i)] for i in graph.output] outputs = outputs[0] if len(outputs) == 1 else _expr.Tuple(outputs) # If requested, directly return the converted expressions. if get_output_expr: return outputs ## Maintain the order of inputs and parameters from the ONNX graph, but only include ## those parameters that are needed to execute the relay graph free_vars = analysis.free_vars(outputs) nodes = {v: k for k, v in self._nodes.items()} free_vars = [nodes[var] for var in free_vars] for i_name in self._params: if i_name in free_vars and i_name not in self._inputs: self._inputs[i_name] = self._nodes[i_name] # Create a function from our output expression and all input variables. func = _function.Function([v for k, v in self._inputs.items()], outputs) return IRModule.from_expr(func), self._params def _parse_value_proto(self, value_proto): """Parse ValueProto or raw str.""" try: name = value_proto.name except AttributeError: name = value_proto return name def _parse_array(self, tensor_proto): np_array = get_numpy(tensor_proto).reshape(tuple(tensor_proto.dims)) return _nd.array(np_array) def _parse_attr(self, attr_proto): """Convert a list of AttributeProto to a dict, with names as keys.""" attrs = {} for a in attr_proto: for f in ["f", "i", "s", "g"]: if a.HasField(f): attrs[a.name] = getattr(a, f) for f in ["floats", "ints", "strings"]: if list(getattr(a, f)): assert a.name not in attrs, "Only one type of attr is allowed" attrs[a.name] = tuple(getattr(a, f)) for f in ["t"]: if a.HasField(f): attrs[a.name] = getattr(a, f) for f in ["tensors"]: if list(getattr(a, f)): assert a.name not in attrs, "Only one type of attr is allowed" attrs[a.name] = tuple(getattr(a, f)) for f in ["graphs"]: if list(getattr(a, f)): raise NotImplementedError("Field {} is not supported in relay.".format(f)) if a.name not in attrs: raise ValueError("Cannot parse attribute: \n{}\n.".format(a)) return attrs def _convert_operator(self, op_name, inputs, attrs, opset): """Convert ONNX operator into a Relay operator. The converter must specify conversions explicitly for incompatible name, and apply handlers to operator attributes. Parameters ---------- op_name : str Operator name, such as Convolution, FullyConnected inputs : list of tvm.relay.function.Function List of inputs. attrs : dict Dict of operator attributes opset : int Opset version Returns ------- sym : tvm.relay.function.Function Converted relay function """ convert_map = _get_convert_map(opset) if op_name in _identity_list: sym = get_relay_op(op_name)(*inputs, **attrs) elif op_name in convert_map: sym = convert_map[op_name](inputs, attrs, self._params) else: raise NotImplementedError("Operator {} not implemented.".format(op_name)) return sym def _fix_outputs(self, op_name, outputs): """A hack to handle dropout or similar operator that have more than one out in ONNX. """ if op_name == "Dropout": if len(outputs) == 1: return outputs # TODO(zhreshold): support dropout mask? outputs = outputs[:-1] return outputs def from_onnx( model, shape=None, dtype="float32", opset=None, freeze_params=False, convert_config=None ): """Convert a ONNX model into an equivalent Relay Function. ONNX graphs are represented as Python Protobuf objects. The companion parameters will be handled automatically. However, the input names from onnx graph is vague, mixing inputs and network weights/bias such as "1", "2"... For convenience, we rename the `real` input names to "input_0", "input_1"... And renaming parameters to "param_0", "param_1"... By default, ONNX defines models in terms of dynamic shapes. The ONNX importer retains that dynamism upon import, and the compiler attempts to convert the model into a static shapes at compile time. If this fails, there may still be dynamic operations in the model. Not all TVM kernels currently support dynamic shapes, please file an issue on discuss.tvm.apache.org if you hit an error with dynamic kernels. Parameters ---------- model : protobuf object ONNX ModelProto after ONNX v1.1.0 shape : dict of str to tuple, optional The input shape to the graph dtype : str or dict of str to str The input types to the graph opset : int, optional Override to autodetected opset. This can be helpful for some testing. freeze_params: bool If this parameter is true, the importer will take any provided onnx input values (weights, shapes, etc) and embed them into the relay model as Constants instead of variables. This allows more aggressive optimizations at compile time and helps in making models static if certain inputs represent attributes relay would traditionally consider compile-time constants. convert_config : Optional[Dict[str, Any]] Default config: use_nt_batch_matmul : bool = True True to convert qualified onnx `matmul` to `nn.batch_matmul` strict to NT format (transpose_a=False, transpose_b=True). Returns ------- mod : tvm.IRModule The relay module for compilation params : dict of str to tvm.nd.NDArray The parameter dict to be used by relay """ global ONNX_DEFAULT_CONFIGS if convert_config is not None: ONNX_DEFAULT_CONFIGS.update(convert_config) try: import onnx if hasattr(onnx.checker, "check_model"): # try use onnx's own model checker before converting any model try: onnx.checker.check_model(model) except Exception as e: # pylint: disable=c-extension-no-member, broad-except # the checker is a bit violent about errors, so simply print warnings here warnings.warn(str(e)) except ImportError: pass g = GraphProto(shape, dtype, freeze_params) graph = model.graph try: opset_in_model = model.opset_import[0].version if model.opset_import else 1 except AttributeError: opset_in_model = 1 if opset is None: opset = opset_in_model elif opset < opset_in_model: warnings.warn( "" f"You are overwritting original opset ver = {opset_in_model} by lower ver = {opset}. " f"That might cause model conversion errors." ) # Use the graph proto as a scope so that ops can access other nodes if needed. with g: mod, params = g.from_onnx(graph, opset) return mod, params
apache-2.0
thientu/scikit-learn
benchmarks/bench_plot_parallel_pairwise.py
295
1247
# Author: Mathieu Blondel <mathieu@mblondel.org> # License: BSD 3 clause import time import pylab as pl from sklearn.utils import check_random_state from sklearn.metrics.pairwise import pairwise_distances from sklearn.metrics.pairwise import pairwise_kernels def plot(func): random_state = check_random_state(0) one_core = [] multi_core = [] sample_sizes = range(1000, 6000, 1000) for n_samples in sample_sizes: X = random_state.rand(n_samples, 300) start = time.time() func(X, n_jobs=1) one_core.append(time.time() - start) start = time.time() func(X, n_jobs=-1) multi_core.append(time.time() - start) pl.figure('scikit-learn parallel %s benchmark results' % func.__name__) pl.plot(sample_sizes, one_core, label="one core") pl.plot(sample_sizes, multi_core, label="multi core") pl.xlabel('n_samples') pl.ylabel('Time (s)') pl.title('Parallel %s' % func.__name__) pl.legend() def euclidean_distances(X, n_jobs): return pairwise_distances(X, metric="euclidean", n_jobs=n_jobs) def rbf_kernels(X, n_jobs): return pairwise_kernels(X, metric="rbf", n_jobs=n_jobs, gamma=0.1) plot(euclidean_distances) plot(rbf_kernels) pl.show()
bsd-3-clause
szagoruyko/pyinn
test/benchmark.py
1
1957
import torch import torch.nn.functional as F from torch.autograd import Variable from torch.nn.init import kaiming_normal from pyinn import conv2d_depthwise from torchnet.meter import TimeMeter from torch.backends import cudnn cudnn.benchmark = True def mobilenet(depth, width, depthwise_function): cfg = [64, (128, 2), 128, (256, 2), 256, (512, 2), 512, 512, 512, 512, 512, (1024, 2), 1024] cast = lambda x: x.cuda() ni = 32 params = {'conv0': cast(kaiming_normal(torch.Tensor(ni, 3, 3, 3)))} for i, x in enumerate(cfg): no = x if isinstance(x, int) else x[0] params['block%d.conv0' % i] = cast(kaiming_normal(torch.Tensor(ni, 1, 3, 3))) params['block%d.conv1' % i] = cast(kaiming_normal(torch.Tensor(no, ni, 1, 1))) ni = no params = {k: Variable(v, requires_grad=True) for k, v in params.items()} def f(input, params): o = F.conv2d(input, params['conv0'], padding=1, stride=2) o = F.relu(o, inplace=True) for i, x in enumerate(cfg): stride = 1 if isinstance(x, int) else x[1] o = depthwise_function(o, params['block%d.conv0' % i], stride=stride, padding=1) o = F.conv2d(o, params['block%d.conv1' % i]) o = F.relu(o, inplace=True) return o return f, params def fconv2d(x, w, stride, padding): return F.conv2d(x, w, stride=stride, padding=padding, groups=x.size(1)) x = torch.autograd.Variable(torch.randn(256,3,224,224).cuda()) f_pyinn, params = mobilenet(18, 1, conv2d_depthwise) f_torch, params = mobilenet(18, 1, fconv2d) # warmup f_pyinn(x, params).sum().backward() f_torch(x, params).sum().backward() torch.cuda.synchronize() meter = TimeMeter('s') for i in range(10): f_torch(x, params).sum().backward() torch.cuda.synchronize() print(meter.value()) meter.reset() for i in range(10): f_pyinn(x, params).sum().backward() torch.cuda.synchronize() print(meter.value())
mit
Tatsh-ansible/ansible
lib/ansible/modules/system/beadm.py
8
11657
#!/usr/bin/python # -*- coding: utf-8 -*- # (c) 2016, Adam Števko <adam.stevko@gmail.com> # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function __metaclass__ = type ANSIBLE_METADATA = {'metadata_version': '1.0', 'status': ['preview'], 'supported_by': 'community'} DOCUMENTATION = ''' --- module: beadm short_description: Manage ZFS boot environments on FreeBSD/Solaris/illumos systems. description: - Create, delete or activate ZFS boot environments. - Mount and unmount ZFS boot environments. version_added: "2.3" author: Adam Števko (@xen0l) options: name: description: - ZFS boot environment name. aliases: [ "be" ] required: True snapshot: description: - If specified, the new boot environment will be cloned from the given snapshot or inactive boot environment. required: false default: false description: description: - Associate a description with a new boot environment. This option is available only on Solarish platforms. required: false default: false options: description: - Create the datasets for new BE with specific ZFS properties. Multiple options can be specified. This option is available only on Solarish platforms. required: false default: false mountpoint: description: - Path where to mount the ZFS boot environment required: false default: false state: description: - Create or delete ZFS boot environment. required: false default: "present" choices: [ "present", "absent", "activated", "mounted", "unmounted" ] force: description: - Specifies if the unmount should be forced. required: false default: false choices: [ "true", "false" ] ''' EXAMPLES = ''' - name: Create ZFS boot environment beadm: name: upgrade-be state: present - name: Create ZFS boot environment from existing inactive boot environment beadm: name: upgrade-be snapshot: be@old state: present - name: Create ZFS boot environment with compression enabled and description "upgrade" beadm: name: upgrade-be options: "compression=on" description: upgrade state: present - name: Delete ZFS boot environment beadm: name: old-be state: absent - name: Mount ZFS boot environment on /tmp/be beadm: name: BE mountpoint: /tmp/be state: mounted - name: Unmount ZFS boot environment beadm: name: BE state: unmounted - name: Activate ZFS boot environment beadm: name: upgrade-be state: activated ''' RETURN = ''' name: description: BE name returned: always type: string sample: pre-upgrade snapshot: description: ZFS snapshot to create BE from returned: always type: string sample: rpool/ROOT/oi-hipster@fresh description: description: BE description returned: always type: string sample: Upgrade from 9.0 to 10.0 options: description: BE additional options returned: always type: string sample: compression=on mountpoint: description: BE mountpoint returned: always type: string sample: /mnt/be state: description: state of the target returned: always type: string sample: present force: description: if forced action is wanted returned: always type: boolean sample: False ''' import os from ansible.module_utils.basic import AnsibleModule class BE(object): def __init__(self, module): self.module = module self.name = module.params['name'] self.snapshot = module.params['snapshot'] self.description = module.params['description'] self.options = module.params['options'] self.mountpoint = module.params['mountpoint'] self.state = module.params['state'] self.force = module.params['force'] self.is_freebsd = os.uname()[0] == 'FreeBSD' def _beadm_list(self): cmd = [self.module.get_bin_path('beadm')] cmd.append('list') cmd.append('-H') if not self.is_freebsd: cmd.append(self.name) return self.module.run_command(cmd) def _find_be_by_name(self, out): for line in out.splitlines(): if line.split('\t')[0] == self.name: return line return None def exists(self): (rc, out, _) = self._beadm_list() if rc == 0: if self.is_freebsd: if self._find_be_by_name(out): return True else: return True else: return False def is_activated(self): (rc, out, _) = self._beadm_list() if rc == 0: if self.is_freebsd: line = self._find_be_by_name(out) if line is not None and 'R' in line.split('\t')[1]: return True else: if 'R' in out.split(';')[2]: return True return False def activate_be(self): cmd = [self.module.get_bin_path('beadm')] cmd.append('activate') cmd.append(self.name) return self.module.run_command(cmd) def create_be(self): cmd = [self.module.get_bin_path('beadm')] cmd.append('create') if self.snapshot: cmd.append('-e') cmd.append(self.snapshot) if not self.is_freebsd: if self.description: cmd.append('-d') cmd.append(self.description) if self.options: cmd.append('-o') cmd.append(self.options) cmd.append(self.name) return self.module.run_command(cmd) def destroy_be(self): cmd = [self.module.get_bin_path('beadm')] cmd.append('destroy') cmd.append('-F') cmd.append(self.name) return self.module.run_command(cmd) def is_mounted(self): (rc, out, _) = self._beadm_list() if rc == 0: if self.is_freebsd: line = self._find_be_by_name(out) # On FreeBSD, we exclude currently mounted BE on /, as it is # special and can be activated even if it is mounted. That is not # possible with non-root BEs. if line.split('\t')[2] is not '-' and \ line.split('\t')[2] is not '/': return True else: if out.split(';')[3]: return True return False def mount_be(self): cmd = [self.module.get_bin_path('beadm')] cmd.append('mount') cmd.append(self.name) if self.mountpoint: cmd.append(self.mountpoint) return self.module.run_command(cmd) def unmount_be(self): cmd = [self.module.get_bin_path('beadm')] cmd.append('unmount') if self.force: cmd.append('-f') cmd.append(self.name) return self.module.run_command(cmd) def main(): module = AnsibleModule( argument_spec=dict( name=dict(required=True, aliases=['be'], type='str'), snapshot=dict(type='str'), description=dict(type='str'), options=dict(type='str'), mountpoint=dict(default=False, type='path'), state=dict( default='present', choices=['present', 'absent', 'activated', 'mounted', 'unmounted']), force=dict(default=False, type='bool'), ), supports_check_mode=True ) be = BE(module) rc = None out = '' err = '' result = {} result['name'] = be.name result['state'] = be.state if be.snapshot: result['snapshot'] = be.snapshot if be.description: result['description'] = be.description if be.options: result['options'] = be.options if be.mountpoint: result['mountpoint'] = be.mountpoint if be.state == 'absent': # beadm on FreeBSD and Solarish systems differs in delete behaviour in # that we are not allowed to delete activated BE on FreeBSD while on # Solarish systems we cannot delete BE if it is mounted. We add mount # check for both platforms as BE should be explicitly unmounted before # being deleted. On FreeBSD, we also check if the BE is activated. if be.exists(): if not be.is_mounted(): if module.check_mode: module.exit_json(changed=True) if be.is_freebsd: if be.is_activated(): module.fail_json(msg='Unable to remove active BE!') (rc, out, err) = be.destroy_be() if rc != 0: module.fail_json(msg='Error while destroying BE: "%s"' % err, name=be.name, stderr=err, rc=rc) else: module.fail_json(msg='Unable to remove BE as it is mounted!') elif be.state == 'present': if not be.exists(): if module.check_mode: module.exit_json(changed=True) (rc, out, err) = be.create_be() if rc != 0: module.fail_json(msg='Error while creating BE: "%s"' % err, name=be.name, stderr=err, rc=rc) elif be.state == 'activated': if not be.is_activated(): if module.check_mode: module.exit_json(changed=True) # On FreeBSD, beadm is unable to activate mounted BEs, so we add # an explicit check for that case. if be.is_freebsd: if be.is_mounted(): module.fail_json(msg='Unable to activate mounted BE!') (rc, out, err) = be.activate_be() if rc != 0: module.fail_json(msg='Error while activating BE: "%s"' % err, name=be.name, stderr=err, rc=rc) elif be.state == 'mounted': if not be.is_mounted(): if module.check_mode: module.exit_json(changed=True) (rc, out, err) = be.mount_be() if rc != 0: module.fail_json(msg='Error while mounting BE: "%s"' % err, name=be.name, stderr=err, rc=rc) elif be.state == 'unmounted': if be.is_mounted(): if module.check_mode: module.exit_json(changed=True) (rc, out, err) = be.unmount_be() if rc != 0: module.fail_json(msg='Error while unmounting BE: "%s"' % err, name=be.name, stderr=err, rc=rc) if rc is None: result['changed'] = False else: result['changed'] = True if out: result['stdout'] = out if err: result['stderr'] = err module.exit_json(**result) if __name__ == '__main__': main()
gpl-3.0
weinitom/robot
attention_tracker/dlib-18.18/python_examples/train_shape_predictor.py
10
5998
#!/usr/bin/python # The contents of this file are in the public domain. See LICENSE_FOR_EXAMPLE_PROGRAMS.txt # # This example program shows how to use dlib's implementation of the paper: # One Millisecond Face Alignment with an Ensemble of Regression Trees by # Vahid Kazemi and Josephine Sullivan, CVPR 2014 # # In particular, we will train a face landmarking model based on a small # dataset and then evaluate it. If you want to visualize the output of the # trained model on some images then you can run the # face_landmark_detection.py example program with predictor.dat as the input # model. # # It should also be noted that this kind of model, while often used for face # landmarking, is quite general and can be used for a variety of shape # prediction tasks. But here we demonstrate it only on a simple face # landmarking task. # # COMPILING/INSTALLING THE DLIB PYTHON INTERFACE # You can install dlib using the command: # pip install dlib # # Alternatively, if you want to compile dlib yourself then go into the dlib # root folder and run: # python setup.py install # or # python setup.py install --yes USE_AVX_INSTRUCTIONS # if you have a CPU that supports AVX instructions, since this makes some # things run faster. # # Compiling dlib should work on any operating system so long as you have # CMake and boost-python installed. On Ubuntu, this can be done easily by # running the command: # sudo apt-get install libboost-python-dev cmake # # Also note that this example requires scikit-image which can be installed # via the command: # pip install scikit-image # Or downloaded from http://scikit-image.org/download.html. import os import sys import glob import dlib from skimage import io # In this example we are going to train a face detector based on the small # faces dataset in the examples/faces directory. This means you need to supply # the path to this faces folder as a command line argument so we will know # where it is. if len(sys.argv) != 2: print( "Give the path to the examples/faces directory as the argument to this " "program. For example, if you are in the python_examples folder then " "execute this program by running:\n" " ./train_shape_predictor.py ../examples/faces") exit() faces_folder = sys.argv[1] options = dlib.shape_predictor_training_options() # Now make the object responsible for training the model. # This algorithm has a bunch of parameters you can mess with. The # documentation for the shape_predictor_trainer explains all of them. # You should also read Kazemi's paper which explains all the parameters # in great detail. However, here I'm just setting three of them # differently than their default values. I'm doing this because we # have a very small dataset. In particular, setting the oversampling # to a high amount (300) effectively boosts the training set size, so # that helps this example. options.oversampling_amount = 300 # I'm also reducing the capacity of the model by explicitly increasing # the regularization (making nu smaller) and by using trees with # smaller depths. options.nu = 0.05 options.tree_depth = 2 options.be_verbose = True # dlib.train_shape_predictor() does the actual training. It will save the # final predictor to predictor.dat. The input is an XML file that lists the # images in the training dataset and also contains the positions of the face # parts. training_xml_path = os.path.join(faces_folder, "training_with_face_landmarks.xml") dlib.train_shape_predictor(training_xml_path, "predictor.dat", options) # Now that we have a model we can test it. dlib.test_shape_predictor() # measures the average distance between a face landmark output by the # shape_predictor and where it should be according to the truth data. print("\nTraining accuracy: {}".format( dlib.test_shape_predictor(training_xml_path, "predictor.dat"))) # The real test is to see how well it does on data it wasn't trained on. We # trained it on a very small dataset so the accuracy is not extremely high, but # it's still doing quite good. Moreover, if you train it on one of the large # face landmarking datasets you will obtain state-of-the-art results, as shown # in the Kazemi paper. testing_xml_path = os.path.join(faces_folder, "testing_with_face_landmarks.xml") print("Testing accuracy: {}".format( dlib.test_shape_predictor(testing_xml_path, "predictor.dat"))) # Now let's use it as you would in a normal application. First we will load it # from disk. We also need to load a face detector to provide the initial # estimate of the facial location. predictor = dlib.shape_predictor("predictor.dat") detector = dlib.get_frontal_face_detector() # Now let's run the detector and shape_predictor over the images in the faces # folder and display the results. print("Showing detections and predictions on the images in the faces folder...") win = dlib.image_window() for f in glob.glob(os.path.join(faces_folder, "*.jpg")): print("Processing file: {}".format(f)) img = io.imread(f) win.clear_overlay() win.set_image(img) # Ask the detector to find the bounding boxes of each face. The 1 in the # second argument indicates that we should upsample the image 1 time. This # will make everything bigger and allow us to detect more faces. dets = detector(img, 1) print("Number of faces detected: {}".format(len(dets))) for k, d in enumerate(dets): print("Detection {}: Left: {} Top: {} Right: {} Bottom: {}".format( k, d.left(), d.top(), d.right(), d.bottom())) # Get the landmarks/parts for the face in box d. shape = predictor(img, d) print("Part 0: {}, Part 1: {} ...".format(shape.part(0), shape.part(1))) # Draw the face landmarks on the screen. win.add_overlay(shape) win.add_overlay(dets) dlib.hit_enter_to_continue()
bsd-3-clause
Fireblend/scikit-learn
sklearn/preprocessing/__init__.py
265
1319
""" The :mod:`sklearn.preprocessing` module includes scaling, centering, normalization, binarization and imputation methods. """ from ._function_transformer import FunctionTransformer from .data import Binarizer from .data import KernelCenterer from .data import MinMaxScaler from .data import MaxAbsScaler from .data import Normalizer from .data import RobustScaler from .data import StandardScaler from .data import add_dummy_feature from .data import binarize from .data import normalize from .data import scale from .data import robust_scale from .data import maxabs_scale from .data import minmax_scale from .data import OneHotEncoder from .data import PolynomialFeatures from .label import label_binarize from .label import LabelBinarizer from .label import LabelEncoder from .label import MultiLabelBinarizer from .imputation import Imputer __all__ = [ 'Binarizer', 'FunctionTransformer', 'Imputer', 'KernelCenterer', 'LabelBinarizer', 'LabelEncoder', 'MultiLabelBinarizer', 'MinMaxScaler', 'MaxAbsScaler', 'Normalizer', 'OneHotEncoder', 'RobustScaler', 'StandardScaler', 'add_dummy_feature', 'PolynomialFeatures', 'binarize', 'normalize', 'scale', 'robust_scale', 'maxabs_scale', 'minmax_scale', 'label_binarize', ]
bsd-3-clause
thientu/scikit-learn
benchmarks/bench_lasso.py
295
3305
""" Benchmarks of Lasso vs LassoLars First, we fix a training set and increase the number of samples. Then we plot the computation time as function of the number of samples. In the second benchmark, we increase the number of dimensions of the training set. Then we plot the computation time as function of the number of dimensions. In both cases, only 10% of the features are informative. """ import gc from time import time import numpy as np from sklearn.datasets.samples_generator import make_regression def compute_bench(alpha, n_samples, n_features, precompute): lasso_results = [] lars_lasso_results = [] it = 0 for ns in n_samples: for nf in n_features: it += 1 print('==================') print('Iteration %s of %s' % (it, max(len(n_samples), len(n_features)))) print('==================') n_informative = nf // 10 X, Y, coef_ = make_regression(n_samples=ns, n_features=nf, n_informative=n_informative, noise=0.1, coef=True) X /= np.sqrt(np.sum(X ** 2, axis=0)) # Normalize data gc.collect() print("- benchmarking Lasso") clf = Lasso(alpha=alpha, fit_intercept=False, precompute=precompute) tstart = time() clf.fit(X, Y) lasso_results.append(time() - tstart) gc.collect() print("- benchmarking LassoLars") clf = LassoLars(alpha=alpha, fit_intercept=False, normalize=False, precompute=precompute) tstart = time() clf.fit(X, Y) lars_lasso_results.append(time() - tstart) return lasso_results, lars_lasso_results if __name__ == '__main__': from sklearn.linear_model import Lasso, LassoLars import pylab as pl alpha = 0.01 # regularization parameter n_features = 10 list_n_samples = np.linspace(100, 1000000, 5).astype(np.int) lasso_results, lars_lasso_results = compute_bench(alpha, list_n_samples, [n_features], precompute=True) pl.figure('scikit-learn LASSO benchmark results') pl.subplot(211) pl.plot(list_n_samples, lasso_results, 'b-', label='Lasso') pl.plot(list_n_samples, lars_lasso_results, 'r-', label='LassoLars') pl.title('precomputed Gram matrix, %d features, alpha=%s' % (n_features, alpha)) pl.legend(loc='upper left') pl.xlabel('number of samples') pl.ylabel('Time (s)') pl.axis('tight') n_samples = 2000 list_n_features = np.linspace(500, 3000, 5).astype(np.int) lasso_results, lars_lasso_results = compute_bench(alpha, [n_samples], list_n_features, precompute=False) pl.subplot(212) pl.plot(list_n_features, lasso_results, 'b-', label='Lasso') pl.plot(list_n_features, lars_lasso_results, 'r-', label='LassoLars') pl.title('%d samples, alpha=%s' % (n_samples, alpha)) pl.legend(loc='upper left') pl.xlabel('number of features') pl.ylabel('Time (s)') pl.axis('tight') pl.show()
bsd-3-clause
ningchi/scikit-learn
benchmarks/bench_glm.py
295
1493
""" A comparison of different methods in GLM Data comes from a random square matrix. """ from datetime import datetime import numpy as np from sklearn import linear_model from sklearn.utils.bench import total_seconds if __name__ == '__main__': import pylab as pl n_iter = 40 time_ridge = np.empty(n_iter) time_ols = np.empty(n_iter) time_lasso = np.empty(n_iter) dimensions = 500 * np.arange(1, n_iter + 1) for i in range(n_iter): print('Iteration %s of %s' % (i, n_iter)) n_samples, n_features = 10 * i + 3, 10 * i + 3 X = np.random.randn(n_samples, n_features) Y = np.random.randn(n_samples) start = datetime.now() ridge = linear_model.Ridge(alpha=1.) ridge.fit(X, Y) time_ridge[i] = total_seconds(datetime.now() - start) start = datetime.now() ols = linear_model.LinearRegression() ols.fit(X, Y) time_ols[i] = total_seconds(datetime.now() - start) start = datetime.now() lasso = linear_model.LassoLars() lasso.fit(X, Y) time_lasso[i] = total_seconds(datetime.now() - start) pl.figure('scikit-learn GLM benchmark results') pl.xlabel('Dimensions') pl.ylabel('Time (s)') pl.plot(dimensions, time_ridge, color='r') pl.plot(dimensions, time_ols, color='g') pl.plot(dimensions, time_lasso, color='b') pl.legend(['Ridge', 'OLS', 'LassoLars'], loc='upper left') pl.axis('tight') pl.show()
bsd-3-clause
CenterForOpenScience/osf.io
addons/dataverse/routes.py
25
2739
from framework.routing import Rule, json_renderer from . import views api_routes = { 'rules': [ Rule( '/settings/dataverse/', 'get', views.dataverse_user_config_get, json_renderer, ), Rule( '/settings/dataverse/accounts/', 'post', views.dataverse_add_user_account, json_renderer, ), Rule( '/settings/dataverse/accounts/', 'get', views.dataverse_account_list, json_renderer, ), Rule( [ '/project/<pid>/dataverse/settings/', '/project/<pid>/node/<nid>/dataverse/settings/', ], 'get', views.dataverse_get_config, json_renderer, ), Rule( [ '/project/<pid>/dataverse/settings/', '/project/<pid>/node/<nid>/dataverse/settings/', ], 'post', views.dataverse_set_config, json_renderer, ), Rule( [ '/project/<pid>/dataverse/user-auth/', '/project/<pid>/node/<nid>/dataverse/user-auth/', ], 'put', views.dataverse_import_auth, json_renderer, ), Rule( [ '/project/<pid>/dataverse/user-auth/', '/project/<pid>/node/<nid>/dataverse/user-auth/', ], 'delete', views.dataverse_deauthorize_node, json_renderer, ), Rule( [ '/project/<pid>/dataverse/list-datasets/', '/project/<pid>/node/<nid>/dataverse/list-datasets/', ], 'post', views.dataverse_get_datasets, json_renderer, ), Rule( [ '/project/<pid>/dataverse/hgrid/root/', '/project/<pid>/node/<nid>/dataverse/hgrid/root/', ], 'get', views.dataverse_root_folder, json_renderer, ), Rule( [ '/project/<pid>/dataverse/publish/', '/project/<pid>/node/<nid>/dataverse/publish/', ], 'put', views.dataverse_publish_dataset, json_renderer, ), Rule( [ '/project/<pid>/dataverse/widget/contents/', '/project/<pid>/node/<nid>/dataverse/widget/contents/', ], 'get', views.dataverse_get_widget_contents, json_renderer, ), ], 'prefix': '/api/v1' }
apache-2.0
ningchi/scikit-learn
examples/text/document_clustering.py
31
8036
""" ======================================= Clustering text documents using k-means ======================================= This is an example showing how the scikit-learn can be used to cluster documents by topics using a bag-of-words approach. This example uses a scipy.sparse matrix to store the features instead of standard numpy arrays. Two feature extraction methods can be used in this example: - TfidfVectorizer uses a in-memory vocabulary (a python dict) to map the most frequent words to features indices and hence compute a word occurrence frequency (sparse) matrix. The word frequencies are then reweighted using the Inverse Document Frequency (IDF) vector collected feature-wise over the corpus. - HashingVectorizer hashes word occurrences to a fixed dimensional space, possibly with collisions. The word count vectors are then normalized to each have l2-norm equal to one (projected to the euclidean unit-ball) which seems to be important for k-means to work in high dimensional space. HashingVectorizer does not provide IDF weighting as this is a stateless model (the fit method does nothing). When IDF weighting is needed it can be added by pipelining its output to a TfidfTransformer instance. Two algorithms are demoed: ordinary k-means and its more scalable cousin minibatch k-means. It can be noted that k-means (and minibatch k-means) are very sensitive to feature scaling and that in this case the IDF weighting helps improve the quality of the clustering by quite a lot as measured against the "ground truth" provided by the class label assignments of the 20 newsgroups dataset. This improvement is not visible in the Silhouette Coefficient which is small for both as this measure seem to suffer from the phenomenon called "Concentration of Measure" or "Curse of Dimensionality" for high dimensional datasets such as text data. Other measures such as V-measure and Adjusted Rand Index are information theoretic based evaluation scores: as they are only based on cluster assignments rather than distances, hence not affected by the curse of dimensionality. Note: as k-means is optimizing a non-convex objective function, it will likely end up in a local optimum. Several runs with independent random init might be necessary to get a good convergence. """ # Author: Peter Prettenhofer <peter.prettenhofer@gmail.com> # Lars Buitinck <L.J.Buitinck@uva.nl> # License: BSD 3 clause from __future__ import print_function from sklearn.datasets import fetch_20newsgroups from sklearn.decomposition import TruncatedSVD from sklearn.feature_extraction.text import TfidfVectorizer from sklearn.feature_extraction.text import HashingVectorizer from sklearn.feature_extraction.text import TfidfTransformer from sklearn.pipeline import make_pipeline from sklearn.preprocessing import Normalizer from sklearn import metrics from sklearn.cluster import KMeans, MiniBatchKMeans import logging from optparse import OptionParser import sys from time import time import numpy as np # Display progress logs on stdout logging.basicConfig(level=logging.INFO, format='%(asctime)s %(levelname)s %(message)s') # parse commandline arguments op = OptionParser() op.add_option("--lsa", dest="n_components", type="int", help="Preprocess documents with latent semantic analysis.") op.add_option("--no-minibatch", action="store_false", dest="minibatch", default=True, help="Use ordinary k-means algorithm (in batch mode).") op.add_option("--no-idf", action="store_false", dest="use_idf", default=True, help="Disable Inverse Document Frequency feature weighting.") op.add_option("--use-hashing", action="store_true", default=False, help="Use a hashing feature vectorizer") op.add_option("--n-features", type=int, default=10000, help="Maximum number of features (dimensions)" " to extract from text.") op.add_option("--verbose", action="store_true", dest="verbose", default=False, help="Print progress reports inside k-means algorithm.") print(__doc__) op.print_help() (opts, args) = op.parse_args() if len(args) > 0: op.error("this script takes no arguments.") sys.exit(1) ############################################################################### # Load some categories from the training set categories = [ 'alt.atheism', 'talk.religion.misc', 'comp.graphics', 'sci.space', ] # Uncomment the following to do the analysis on all the categories #categories = None print("Loading 20 newsgroups dataset for categories:") print(categories) dataset = fetch_20newsgroups(subset='all', categories=categories, shuffle=True, random_state=42) print("%d documents" % len(dataset.data)) print("%d categories" % len(dataset.target_names)) print() labels = dataset.target true_k = np.unique(labels).shape[0] print("Extracting features from the training dataset using a sparse vectorizer") t0 = time() if opts.use_hashing: if opts.use_idf: # Perform an IDF normalization on the output of HashingVectorizer hasher = HashingVectorizer(n_features=opts.n_features, stop_words='english', non_negative=True, norm=None, binary=False) vectorizer = make_pipeline(hasher, TfidfTransformer()) else: vectorizer = HashingVectorizer(n_features=opts.n_features, stop_words='english', non_negative=False, norm='l2', binary=False) else: vectorizer = TfidfVectorizer(max_df=0.5, max_features=opts.n_features, min_df=2, stop_words='english', use_idf=opts.use_idf) X = vectorizer.fit_transform(dataset.data) print("done in %fs" % (time() - t0)) print("n_samples: %d, n_features: %d" % X.shape) print() if opts.n_components: print("Performing dimensionality reduction using LSA") t0 = time() # Vectorizer results are normalized, which makes KMeans behave as # spherical k-means for better results. Since LSA/SVD results are # not normalized, we have to redo the normalization. svd = TruncatedSVD(opts.n_components) lsa = make_pipeline(svd, Normalizer(copy=False)) X = lsa.fit_transform(X) print("done in %fs" % (time() - t0)) explained_variance = svd.explained_variance_ratio_.sum() print("Explained variance of the SVD step: {}%".format( int(explained_variance * 100))) print() ############################################################################### # Do the actual clustering if opts.minibatch: km = MiniBatchKMeans(n_clusters=true_k, init='k-means++', n_init=1, init_size=1000, batch_size=1000, verbose=opts.verbose) else: km = KMeans(n_clusters=true_k, init='k-means++', max_iter=100, n_init=1, verbose=opts.verbose) print("Clustering sparse data with %s" % km) t0 = time() km.fit(X) print("done in %0.3fs" % (time() - t0)) print() print("Homogeneity: %0.3f" % metrics.homogeneity_score(labels, km.labels_)) print("Completeness: %0.3f" % metrics.completeness_score(labels, km.labels_)) print("V-measure: %0.3f" % metrics.v_measure_score(labels, km.labels_)) print("Adjusted Rand-Index: %.3f" % metrics.adjusted_rand_score(labels, km.labels_)) print("Silhouette Coefficient: %0.3f" % metrics.silhouette_score(X, km.labels_, sample_size=1000)) print() if not (opts.n_components or opts.use_hashing): print("Top terms per cluster:") order_centroids = km.cluster_centers_.argsort()[:, ::-1] terms = vectorizer.get_feature_names() for i in range(true_k): print("Cluster %d:" % i, end='') for ind in order_centroids[i, :10]: print(' %s' % terms[ind], end='') print()
bsd-3-clause
Fireblend/scikit-learn
examples/cluster/plot_mean_shift.py
348
1793
""" ============================================= A demo of the mean-shift clustering algorithm ============================================= Reference: Dorin Comaniciu and Peter Meer, "Mean Shift: A robust approach toward feature space analysis". IEEE Transactions on Pattern Analysis and Machine Intelligence. 2002. pp. 603-619. """ print(__doc__) import numpy as np from sklearn.cluster import MeanShift, estimate_bandwidth from sklearn.datasets.samples_generator import make_blobs ############################################################################### # Generate sample data centers = [[1, 1], [-1, -1], [1, -1]] X, _ = make_blobs(n_samples=10000, centers=centers, cluster_std=0.6) ############################################################################### # Compute clustering with MeanShift # The following bandwidth can be automatically detected using bandwidth = estimate_bandwidth(X, quantile=0.2, n_samples=500) ms = MeanShift(bandwidth=bandwidth, bin_seeding=True) ms.fit(X) labels = ms.labels_ cluster_centers = ms.cluster_centers_ labels_unique = np.unique(labels) n_clusters_ = len(labels_unique) print("number of estimated clusters : %d" % n_clusters_) ############################################################################### # Plot result import matplotlib.pyplot as plt from itertools import cycle plt.figure(1) plt.clf() colors = cycle('bgrcmykbgrcmykbgrcmykbgrcmyk') for k, col in zip(range(n_clusters_), colors): my_members = labels == k cluster_center = cluster_centers[k] plt.plot(X[my_members, 0], X[my_members, 1], col + '.') plt.plot(cluster_center[0], cluster_center[1], 'o', markerfacecolor=col, markeredgecolor='k', markersize=14) plt.title('Estimated number of clusters: %d' % n_clusters_) plt.show()
bsd-3-clause
Nehoroshiy/multi_classifier
recommender_test/recommend_test.py
1
3816
""" 2015-2016 Constantine Belev const.belev@ya.ru """ import fileinput import sys from collections import namedtuple import numpy as np import pandas as pd import scipy as sp from pandas.io.common import ZipFile from scipy import sparse from manopt.approximator import cg if sys.version_info[0] < 3: pass else: pass from io import BytesIO def get_movielens_data(local_file=None): if not local_file: print('Downloading data...') zip_file_url = 'http://files.grouplens.org/datasets/movielens/ml-10m.zip' zip_response = get(zip_file_url) zip_content = BytesIO(zip_response.content) fname = 'ml-10M100K/ratings.dat' with ZipFile(zip_content) as zfile: zfile.extract(fname) for line in fileinput.input([fname], inplace=True): print(line.replace('::', ';')) else: fname = local_file ml_data = pd.read_csv(fname, sep=';', header=None, engine='c', names=['userid', 'movieid', 'rating', 'timestamp'], usecols=['userid', 'movieid', 'rating']) # normalize indices to avoid gaps ml_data['movieid'] = ml_data.groupby('movieid', sort=False).grouper.group_info[0] ml_data['userid'] = ml_data.groupby('userid', sort=False).grouper.group_info[0] # build sparse user-movie matrix data_shape = ml_data[['userid', 'movieid']].max() + 1 data_matrix = sp.sparse.csr_matrix((ml_data['rating'], (ml_data['userid'], ml_data['movieid'])), shape=data_shape, dtype=np.float64) print('Done.') return data_matrix def split_data(data, test_ratio=0.2): '''Randomly splits data into training and testing datasets. Default ratio is 80%/20%. Returns datasets in namedtuple format for convenience. Usage: train_data, test_data = split_data(data_matrix) or movielens_data = split_data(data_matrix) and later in code: do smth with movielens_data.train do smth with movielens_data.test ''' num_users = data.shape[0] idx = np.zeros((num_users,), dtype=bool) sel = np.random.choice(num_users, int(test_ratio*num_users), replace=False) np.put(idx, sel, True) Movielens_data = namedtuple('MovieLens10M', ['train', 'test']) movielens_data = Movielens_data(train=data[~idx, :], test=data[idx, :]) return movielens_data if __name__ == "__main__": local_file_name = 'ml-10M100K/ratings.dat' data_matrix = get_movielens_data(local_file_name) train_matrix, test_matrix = split_data(data_matrix, test_ratio=0.995) print('Matrix build of shape {} step done. Start sorting sigma_set'.format(train_matrix.shape)) # sorted sigma set sigma_set = train_matrix.nonzero() sigma_set[0][:] = sigma_set[0][sigma_set[1].argsort()] sigma_set[1][:] = sigma_set[1][sigma_set[1].argsort()] sigma_set[1][:] = sigma_set[1][sigma_set[0].argsort()] sigma_set[0][:] = sigma_set[0][sigma_set[0].argsort()] print('sigma_set sorting done. Start iterations') x = None maxiter_ordinary = 10 r = 6 for rank in range(1, r): current_maxiter = 1 #np.log(rank) + 1 x, it, err = cg(train_matrix, sigma_set, rank, x0=x, maxiter=int(maxiter_ordinary * current_maxiter)) #if rank in [3, 4, 5]: # cProfile.run('cg(train_matrix, sigma_set, rank, x0=x, maxiter=maxiter_ordinary * current_maxiter)') print('Iterations for rank {} done with err {}'.format(rank, err[-1])) print('Current sigmas: {}'.format(x.s)) if it != int(maxiter_ordinary * current_maxiter): r = rank break print('rank is {}'.format(r)) print(x.s) x, it, err = cg(train_matrix, sigma_set, r=6, x0=x, maxiter=200)
mit
DistrictDataLabs/yellowbrick
yellowbrick/datasets/signature.py
1
1179
# yellowbrick.datasets.signature # Performs SHA 256 hashing of a file for dataset archive verification. # # Author: Benjamin Bengfort # Created: Tue Jul 31 14:18:11 2018 -0400 # # Copyright (C) 2018 The scikit-yb developers # For license information, see LICENSE.txt # # ID: signature.py [7082742] benjamin@bengfort.com $ """ Performs SHA 256 hashing of a file for dataset archive verification. """ ########################################################################## ## Imports ########################################################################## import hashlib ########################################################################## ## Signature checking utility ########################################################################## def sha256sum(path, blocksize=65536): """ Computes the SHA256 signature of a file to verify that the file has not been modified in transit and that it is the correct version of the data. """ sig = hashlib.sha256() with open(path, "rb") as f: buf = f.read(blocksize) while len(buf) > 0: sig.update(buf) buf = f.read(blocksize) return sig.hexdigest()
apache-2.0
woobe/h2o
py/testdir_multi_jvm/test_rf_libsvm_fvec.py
1
3501
import unittest import random, sys, time, os sys.path.extend(['.','..','py']) import h2o, h2o_cmd, h2o_hosts, h2o_browse as h2b, h2o_import as h2i class Basic(unittest.TestCase): def tearDown(self): h2o.check_sandbox_for_errors() @classmethod def setUpClass(cls): global SEED, localhost SEED = h2o.setup_random_seed() localhost = h2o.decide_if_localhost() if (localhost): h2o.build_cloud(3,java_heap_GB=4) else: h2o_hosts.build_cloud_with_hosts() # uses import Hdfs for s3n instead of import folder @classmethod def tearDownClass(cls): h2o.tear_down_cloud() def test_rf_libsvm_fvec(self): h2o.beta_features = True # just do the import folder once # make the timeout variable per dataset. it can be 10 secs for covtype 20x (col key creation) # so probably 10x that for covtype200 csvFilenameList = [ ("mnist_train.svm", "cM", 30, 1), # FIX! fails KMeansScore # not integer output # ("colon-cancer.svm", "cA", 30, 1), ("connect4.svm", "cB", 30, 1), ("syn_6_1000_10.svm", "cK", 30, 1), # float response requires regression # ("syn_0_100_1000.svm", "cL", 30, 1), ("mushrooms.svm", "cG", 30, 1), # rf doesn't like reals # ("duke.svm", "cD", 30, 1), # too many features? 150K inspect timeout? # ("E2006.train.svm", "cE", 30, 1), ("gisette_scale.svm", "cF", 30, 1), # too big for rf (memory error) # ("news20.svm", "cH", 30, 1), # multiclass format ..don't support # ("tmc2007_train.svm", "cJ", 30, 1), ("covtype.binary.svm", "cC", 30, 1), # normal csv ] ### csvFilenameList = random.sample(csvFilenameAll,1) # h2b.browseTheCloud() lenNodes = len(h2o.nodes) firstDone = False for (csvFilename, hex_key, timeoutSecs, resultMult) in csvFilenameList: # have to import each time, because h2o deletes source after parse bucket = "home-0xdiag-datasets" csvPathname = "libsvm/" + csvFilename # PARSE****************************************** parseResult = h2i.import_parse(bucket=bucket, path=csvPathname, schema='put', hex_key=hex_key, timeoutSecs=2000) print "Parse result['destination_key']:", parseResult['destination_key'] # INSPECT****************************************** start = time.time() inspect = h2o_cmd.runInspect(None, parseResult['destination_key'], timeoutSecs=360) print "Inspect:", parseResult['destination_key'], "took", time.time() - start, "seconds" h2o_cmd.infoFromInspect(inspect, csvFilename) # RF****************************************** kwargs = { 'ntrees': 1, 'response': 0, 'classification': 0, } timeoutSecs = 600 start = time.time() rf = h2o_cmd.runRF(parseResult=parseResult, timeoutSecs=timeoutSecs, **kwargs) elapsed = time.time() - start print "rf end on ", csvPathname, 'took', elapsed, 'seconds.', \ "%d pct. of timeout" % ((elapsed/timeoutSecs) * 100) if __name__ == '__main__': h2o.unit_main()
apache-2.0
siavooshpayandehazad/SoCDep2
src/main/python/ConfigAndPackages/PackageFile.py
2
2984
# Copyright (C) 2015 Siavoosh Payandeh Azad ################################################ # Internal Variables ################################################ LoGDirectory = "LOGS" ################################################ # Turn Model Sets ################################################ FULL_TurnModel_2D = ['E2N', 'E2S', 'W2N', 'W2S', 'S2W', 'S2E', 'N2W', 'N2E'] FULL_TurnModel_3D = ['E2N', 'E2S', 'W2N', 'W2S', 'S2W', 'S2E', 'N2W', 'N2E', 'N2U', 'N2D', 'S2U', 'S2D', 'W2U', 'W2D', 'E2U', 'E2D', 'U2N', 'U2S', 'U2E', 'U2W', 'D2N', 'D2S', 'D2E', 'D2W'] # 2D Turn Models: XY_TurnModel = ['E2N', 'E2S', 'W2N', 'W2S'] YX_TurnModel = ['S2W', 'S2E', 'N2W', 'N2E'] EastFirst_TurnModel = ['E2N', 'E2S', 'W2N', 'W2S', 'S2W', 'N2W'] WestFirst_TurnModel = ['E2N', 'E2S', 'W2N', 'W2S', 'S2E', 'N2E'] NorthLast_TurnModel = ['E2N', 'E2S', 'W2N', 'W2S', 'N2W', 'N2E'] NorthFirst_TurnModel = ['E2S', 'W2S', 'S2W', 'S2E', 'N2W', 'N2E'] SouthFirst_TurnModel = ['E2N', 'W2N', 'S2W', 'S2E', 'N2W', 'N2E'] NegativeFirst2D_TurnModel = ['E2N', 'E2S', 'W2N', 'S2E', 'W2S', 'N2E'] # 3D Turn Models: XYZ_TurnModel = ['E2N', 'E2S', 'W2N', 'W2S', 'S2U', 'S2D', 'N2U', 'N2D', 'W2U', 'W2D', 'E2U', 'E2D'] # based on description in "Turn model based router design for 3D network on chip" by Chemli and Zitouni NegativeFirst3D_TurnModel = ['E2N', 'E2S', 'W2N', 'W2S', 'S2E', 'N2E', 'N2U', 'N2D', 'S2U', 'S2D', 'E2U', 'E2D', 'W2U', 'W2D', 'U2N', 'U2S', 'U2E', 'D2S', 'D2N', 'D2E'] routing_alg_list_2d = [YX_TurnModel, XY_TurnModel, WestFirst_TurnModel, NorthLast_TurnModel, NegativeFirst2D_TurnModel, EastFirst_TurnModel, SouthFirst_TurnModel, NorthFirst_TurnModel] routing_alg_list_3d = [XYZ_TurnModel] # , NegativeFirst3D_TurnModel] # TODO: test this for deadlock freeness! ################################################ # SHM Sets ################################################ TurnsHealth_2DNetwork = {"N2W": True, "N2E": True, "S2W": True, "S2E": True, "W2N": True, "W2S": True, "E2N": True, "E2S": True} # TurnsHealth for conventional 3D NoC TurnsHealth_3DNetwork = {"N2W": True, "N2E": True, "S2W": True, "S2E": True, "W2N": True, "W2S": True, "E2N": True, "E2S": True, "N2U": True, "N2D": True, "S2U": True, "S2D": True, "W2U": True, "W2D": True, "E2U": True, "E2D": True, "U2W": True, "U2E": True, "U2N": True, "U2S": True, "D2W": True, "D2E": True, "D2N": True, "D2S": True} ################################################ # System Package info ################################################ ImportModules = ['tkinter', 'networkx', 'matplotlib', 'scipy', 'PIL', 'pympler', 'simpy', 'collections', 'sklearn'] #,'image']
gpl-2.0
QianruZhou333/ASleep
my_importData_0_small.py
2
1600
import numpy as np import pandas as pd input_file = "0_floor.csv" # comma delimited is the default df = pd.read_csv(input_file, header = 0) # for space delimited use: # df = pd.read_csv(input_file, header = 0, delimiter = " ") # for tab delimited use: # df = pd.read_csv(input_file, header = 0, delimiter = "\t") # put the original column names in a python list original_headers = list(df.columns.values) # remove the non-numeric columns df = df._get_numeric_data() # put the numeric column names in a python list numeric_headers = list(df.columns.values) # create a numpy array with the numeric values for input into scikit-learn numpy_array = df.as_matrix() # reverse the order of the columns #numeric_headers.reverse() #reverse_df = df[numeric_headers] # throughput random forest regression t = numpy_array[0:168, 3] x = np.linspace(0, 167, 168) xall = np.linspace(0, 189, 190) xtest = np.linspace(168, 189, 22) from sklearn.ensemble import RandomForestRegressor #tfit = RandomForestRegressor(100).fit(x[:, None], t).predict(x[:, None]) tfit = RandomForestRegressor(100).fit(numpy_array[0:168, 0:2 ], t).predict(numpy_array[168:190, 0:2]) import matplotlib.pyplot as plt fig, ax = plt.subplots() #ax.errorbar(x, t, 0.3, fmt='*', label="Training traffic") ax.plot(xtest, tfit, '-r', label="Predicted traffic") ax.errorbar(xtest, numpy_array[168:190, 3], fmt='og', label="Test traffic") #ax.set_ylabel('Throughput (kbits/second)') #ax.set_xlabel('Time in hours') #ax.set_title('Taffic Prediction with Random Forest Regression on ground floor') #ax.legend(loc="upper left") plt.show()
apache-2.0
Akshay0724/scikit-learn
examples/plot_multioutput_face_completion.py
73
2986
""" ============================================== Face completion with a multi-output estimators ============================================== This example shows the use of multi-output estimator to complete images. The goal is to predict the lower half of a face given its upper half. The first column of images shows true faces. The next columns illustrate how extremely randomized trees, k nearest neighbors, linear regression and ridge regression complete the lower half of those faces. """ print(__doc__) import numpy as np import matplotlib.pyplot as plt from sklearn.datasets import fetch_olivetti_faces from sklearn.utils.validation import check_random_state from sklearn.ensemble import ExtraTreesRegressor from sklearn.neighbors import KNeighborsRegressor from sklearn.linear_model import LinearRegression from sklearn.linear_model import RidgeCV # Load the faces datasets data = fetch_olivetti_faces() targets = data.target data = data.images.reshape((len(data.images), -1)) train = data[targets < 30] test = data[targets >= 30] # Test on independent people # Test on a subset of people n_faces = 5 rng = check_random_state(4) face_ids = rng.randint(test.shape[0], size=(n_faces, )) test = test[face_ids, :] n_pixels = data.shape[1] # Upper half of the faces X_train = train[:, :(n_pixels + 1) // 2] # Lower half of the faces y_train = train[:, n_pixels // 2:] X_test = test[:, :(n_pixels + 1) // 2] y_test = test[:, n_pixels // 2:] # Fit estimators ESTIMATORS = { "Extra trees": ExtraTreesRegressor(n_estimators=10, max_features=32, random_state=0), "K-nn": KNeighborsRegressor(), "Linear regression": LinearRegression(), "Ridge": RidgeCV(), } y_test_predict = dict() for name, estimator in ESTIMATORS.items(): estimator.fit(X_train, y_train) y_test_predict[name] = estimator.predict(X_test) # Plot the completed faces image_shape = (64, 64) n_cols = 1 + len(ESTIMATORS) plt.figure(figsize=(2. * n_cols, 2.26 * n_faces)) plt.suptitle("Face completion with multi-output estimators", size=16) for i in range(n_faces): true_face = np.hstack((X_test[i], y_test[i])) if i: sub = plt.subplot(n_faces, n_cols, i * n_cols + 1) else: sub = plt.subplot(n_faces, n_cols, i * n_cols + 1, title="true faces") sub.axis("off") sub.imshow(true_face.reshape(image_shape), cmap=plt.cm.gray, interpolation="nearest") for j, est in enumerate(sorted(ESTIMATORS)): completed_face = np.hstack((X_test[i], y_test_predict[est][i])) if i: sub = plt.subplot(n_faces, n_cols, i * n_cols + 2 + j) else: sub = plt.subplot(n_faces, n_cols, i * n_cols + 2 + j, title=est) sub.axis("off") sub.imshow(completed_face.reshape(image_shape), cmap=plt.cm.gray, interpolation="nearest") plt.show()
bsd-3-clause
jmargeta/scikit-learn
examples/applications/wikipedia_principal_eigenvector.py
4
7744
""" =============================== Wikipedia principal eigenvector =============================== A classical way to assert the relative importance of vertices in a graph is to compute the principal eigenvector of the adjacency matrix so as to assign to each vertex the values of the components of the first eigenvector as a centrality score: http://en.wikipedia.org/wiki/Eigenvector_centrality On the graph of webpages and links those values are called the PageRank scores by Google. The goal of this example is to analyze the graph of links inside wikipedia articles to rank articles by relative importance according to this eigenvector centrality. The traditional way to compute the principal eigenvector is to use the power iteration method: http://en.wikipedia.org/wiki/Power_iteration Here the computation is achieved thanks to Martinsson's Randomized SVD algorithm implemented in the scikit. The graph data is fetched from the DBpedia dumps. DBpedia is an extraction of the latent structured data of the Wikipedia content. """ # Author: Olivier Grisel <olivier.grisel@ensta.org> # License: Simplified BSD from __future__ import print_function from bz2 import BZ2File import os from datetime import datetime from pprint import pprint from time import time import numpy as np from scipy import sparse from sklearn.utils.extmath import randomized_svd from sklearn.externals.joblib import Memory print(__doc__) ############################################################################### # Where to download the data, if not already on disk redirects_url = "http://downloads.dbpedia.org/3.5.1/en/redirects_en.nt.bz2" redirects_filename = redirects_url.rsplit("/", 1)[1] page_links_url = "http://downloads.dbpedia.org/3.5.1/en/page_links_en.nt.bz2" page_links_filename = page_links_url.rsplit("/", 1)[1] resources = [ (redirects_url, redirects_filename), (page_links_url, page_links_filename), ] for url, filename in resources: if not os.path.exists(filename): import urllib print("Downloading data from '%s', please wait..." % url) opener = urllib.urlopen(url) open(filename, 'wb').write(opener.read()) print() ############################################################################### # Loading the redirect files memory = Memory(cachedir=".") def index(redirects, index_map, k): """Find the index of an article name after redirect resolution""" k = redirects.get(k, k) return index_map.setdefault(k, len(index_map)) DBPEDIA_RESOURCE_PREFIX_LEN = len("http://dbpedia.org/resource/") SHORTNAME_SLICE = slice(DBPEDIA_RESOURCE_PREFIX_LEN + 1, -1) def short_name(nt_uri): """Remove the < and > URI markers and the common URI prefix""" return nt_uri[SHORTNAME_SLICE] def get_redirects(redirects_filename): """Parse the redirections and build a transitively closed map out of it""" redirects = {} print("Parsing the NT redirect file") for l, line in enumerate(BZ2File(redirects_filename)): split = line.split() if len(split) != 4: print("ignoring malformed line: " + line) continue redirects[short_name(split[0])] = short_name(split[2]) if l % 1000000 == 0: print("[%s] line: %08d" % (datetime.now().isoformat(), l)) # compute the transitive closure print("Computing the transitive closure of the redirect relation") for l, source in enumerate(redirects.keys()): transitive_target = None target = redirects[source] seen = set([source]) while True: transitive_target = target target = redirects.get(target) if target is None or target in seen: break seen.add(target) redirects[source] = transitive_target if l % 1000000 == 0: print("[%s] line: %08d" % (datetime.now().isoformat(), l)) return redirects # disabling joblib as the pickling of large dicts seems much too slow #@memory.cache def get_adjacency_matrix(redirects_filename, page_links_filename, limit=None): """Extract the adjacency graph as a scipy sparse matrix Redirects are resolved first. Returns X, the scipy sparse adjacency matrix, redirects as python dict from article names to article names and index_map a python dict from article names to python int (article indexes). """ print("Computing the redirect map") redirects = get_redirects(redirects_filename) print("Computing the integer index map") index_map = dict() links = list() for l, line in enumerate(BZ2File(page_links_filename)): split = line.split() if len(split) != 4: print("ignoring malformed line: " + line) continue i = index(redirects, index_map, short_name(split[0])) j = index(redirects, index_map, short_name(split[2])) links.append((i, j)) if l % 1000000 == 0: print("[%s] line: %08d" % (datetime.now().isoformat(), l)) if limit is not None and l >= limit - 1: break print("Computing the adjacency matrix") X = sparse.lil_matrix((len(index_map), len(index_map)), dtype=np.float32) for i, j in links: X[i, j] = 1.0 del links print("Converting to CSR representation") X = X.tocsr() print("CSR conversion done") return X, redirects, index_map # stop after 5M links to make it possible to work in RAM X, redirects, index_map = get_adjacency_matrix( redirects_filename, page_links_filename, limit=5000000) names = dict((i, name) for name, i in index_map.iteritems()) print("Computing the principal singular vectors using randomized_svd") t0 = time() U, s, V = randomized_svd(X, 5, n_iter=3) print("done in %0.3fs" % (time() - t0)) # print the names of the wikipedia related strongest compenents of the the # principal singular vector which should be similar to the highest eigenvector print("Top wikipedia pages according to principal singular vectors") pprint([names[i] for i in np.abs(U.T[0]).argsort()[-10:]]) pprint([names[i] for i in np.abs(V[0]).argsort()[-10:]]) def centrality_scores(X, alpha=0.85, max_iter=100, tol=1e-10): """Power iteration computation of the principal eigenvector This method is also known as Google PageRank and the implementation is based on the one from the NetworkX project (BSD licensed too) with copyrights by: Aric Hagberg <hagberg@lanl.gov> Dan Schult <dschult@colgate.edu> Pieter Swart <swart@lanl.gov> """ n = X.shape[0] X = X.copy() incoming_counts = np.asarray(X.sum(axis=1)).ravel() print("Normalizing the graph") for i in incoming_counts.nonzero()[0]: X.data[X.indptr[i]:X.indptr[i + 1]] *= 1.0 / incoming_counts[i] dangle = np.asarray(np.where(X.sum(axis=1) == 0, 1.0 / n, 0)).ravel() scores = np.ones(n, dtype=np.float32) / n # initial guess for i in range(max_iter): print("power iteration #%d" % i) prev_scores = scores scores = (alpha * (scores * X + np.dot(dangle, prev_scores)) + (1 - alpha) * prev_scores.sum() / n) # check convergence: normalized l_inf norm scores_max = np.abs(scores).max() if scores_max == 0.0: scores_max = 1.0 err = np.abs(scores - prev_scores).max() / scores_max print("error: %0.6f" % err) if err < n * tol: return scores return scores print("Computing principal eigenvector score using a power iteration method") t0 = time() scores = centrality_scores(X, max_iter=100, tol=1e-10) print("done in %0.3fs" % (time() - t0)) pprint([names[i] for i in np.abs(scores).argsort()[-10:]])
bsd-3-clause
Akshay0724/scikit-learn
sklearn/tests/test_pipeline.py
13
31148
""" Test the pipeline module. """ from tempfile import mkdtemp import shutil import time import numpy as np from scipy import sparse from sklearn.externals.six.moves import zip from sklearn.utils.testing import assert_raises from sklearn.utils.testing import assert_raises_regex from sklearn.utils.testing import assert_raise_message from sklearn.utils.testing import assert_equal from sklearn.utils.testing import assert_false from sklearn.utils.testing import assert_true from sklearn.utils.testing import assert_array_equal from sklearn.utils.testing import assert_array_almost_equal from sklearn.utils.testing import assert_dict_equal from sklearn.base import clone, BaseEstimator from sklearn.pipeline import Pipeline, FeatureUnion, make_pipeline, make_union from sklearn.svm import SVC from sklearn.linear_model import LogisticRegression from sklearn.linear_model import LinearRegression from sklearn.cluster import KMeans from sklearn.feature_selection import SelectKBest, f_classif from sklearn.decomposition import PCA, TruncatedSVD from sklearn.datasets import load_iris from sklearn.preprocessing import StandardScaler from sklearn.feature_extraction.text import CountVectorizer from sklearn.externals.joblib import Memory JUNK_FOOD_DOCS = ( "the pizza pizza beer copyright", "the pizza burger beer copyright", "the the pizza beer beer copyright", "the burger beer beer copyright", "the coke burger coke copyright", "the coke burger burger", ) class NoFit(object): """Small class to test parameter dispatching. """ def __init__(self, a=None, b=None): self.a = a self.b = b class NoTrans(NoFit): def fit(self, X, y): return self def get_params(self, deep=False): return {'a': self.a, 'b': self.b} def set_params(self, **params): self.a = params['a'] return self class NoInvTransf(NoTrans): def transform(self, X, y=None): return X class Transf(NoInvTransf): def transform(self, X, y=None): return X def inverse_transform(self, X): return X class TransfFitParams(Transf): def fit(self, X, y, **fit_params): self.fit_params = fit_params return self class Mult(BaseEstimator): def __init__(self, mult=1): self.mult = mult def fit(self, X, y): return self def transform(self, X): return np.asarray(X) * self.mult def inverse_transform(self, X): return np.asarray(X) / self.mult def predict(self, X): return (np.asarray(X) * self.mult).sum(axis=1) predict_proba = predict_log_proba = decision_function = predict def score(self, X, y=None): return np.sum(X) class FitParamT(BaseEstimator): """Mock classifier """ def __init__(self): self.successful = False def fit(self, X, y, should_succeed=False): self.successful = should_succeed def predict(self, X): return self.successful def fit_predict(self, X, y, should_succeed=False): self.fit(X, y, should_succeed=should_succeed) return self.predict(X) def score(self, X, y=None, sample_weight=None): if sample_weight is not None: X = X * sample_weight return np.sum(X) class DummyTransf(Transf): """Transformer which store the column means""" def fit(self, X, y): self.means_ = np.mean(X, axis=0) # store timestamp to figure out whether the result of 'fit' has been # cached or not self.timestamp_ = time.time() return self def test_pipeline_init(): # Test the various init parameters of the pipeline. assert_raises(TypeError, Pipeline) # Check that we can't instantiate pipelines with objects without fit # method assert_raises_regex(TypeError, 'Last step of Pipeline should implement fit. ' '.*NoFit.*', Pipeline, [('clf', NoFit())]) # Smoke test with only an estimator clf = NoTrans() pipe = Pipeline([('svc', clf)]) assert_equal(pipe.get_params(deep=True), dict(svc__a=None, svc__b=None, svc=clf, **pipe.get_params(deep=False))) # Check that params are set pipe.set_params(svc__a=0.1) assert_equal(clf.a, 0.1) assert_equal(clf.b, None) # Smoke test the repr: repr(pipe) # Test with two objects clf = SVC() filter1 = SelectKBest(f_classif) pipe = Pipeline([('anova', filter1), ('svc', clf)]) # Check that we can't instantiate with non-transformers on the way # Note that NoTrans implements fit, but not transform assert_raises_regex(TypeError, 'All intermediate steps should be transformers' '.*\\bNoTrans\\b.*', Pipeline, [('t', NoTrans()), ('svc', clf)]) # Check that params are set pipe.set_params(svc__C=0.1) assert_equal(clf.C, 0.1) # Smoke test the repr: repr(pipe) # Check that params are not set when naming them wrong assert_raises(ValueError, pipe.set_params, anova__C=0.1) # Test clone pipe2 = clone(pipe) assert_false(pipe.named_steps['svc'] is pipe2.named_steps['svc']) # Check that apart from estimators, the parameters are the same params = pipe.get_params(deep=True) params2 = pipe2.get_params(deep=True) for x in pipe.get_params(deep=False): params.pop(x) for x in pipe2.get_params(deep=False): params2.pop(x) # Remove estimators that where copied params.pop('svc') params.pop('anova') params2.pop('svc') params2.pop('anova') assert_equal(params, params2) def test_pipeline_methods_anova(): # Test the various methods of the pipeline (anova). iris = load_iris() X = iris.data y = iris.target # Test with Anova + LogisticRegression clf = LogisticRegression() filter1 = SelectKBest(f_classif, k=2) pipe = Pipeline([('anova', filter1), ('logistic', clf)]) pipe.fit(X, y) pipe.predict(X) pipe.predict_proba(X) pipe.predict_log_proba(X) pipe.score(X, y) def test_pipeline_fit_params(): # Test that the pipeline can take fit parameters pipe = Pipeline([('transf', Transf()), ('clf', FitParamT())]) pipe.fit(X=None, y=None, clf__should_succeed=True) # classifier should return True assert_true(pipe.predict(None)) # and transformer params should not be changed assert_true(pipe.named_steps['transf'].a is None) assert_true(pipe.named_steps['transf'].b is None) # invalid parameters should raise an error message assert_raise_message( TypeError, "fit() got an unexpected keyword argument 'bad'", pipe.fit, None, None, clf__bad=True ) def test_pipeline_sample_weight_supported(): # Pipeline should pass sample_weight X = np.array([[1, 2]]) pipe = Pipeline([('transf', Transf()), ('clf', FitParamT())]) pipe.fit(X, y=None) assert_equal(pipe.score(X), 3) assert_equal(pipe.score(X, y=None), 3) assert_equal(pipe.score(X, y=None, sample_weight=None), 3) assert_equal(pipe.score(X, sample_weight=np.array([2, 3])), 8) def test_pipeline_sample_weight_unsupported(): # When sample_weight is None it shouldn't be passed X = np.array([[1, 2]]) pipe = Pipeline([('transf', Transf()), ('clf', Mult())]) pipe.fit(X, y=None) assert_equal(pipe.score(X), 3) assert_equal(pipe.score(X, sample_weight=None), 3) assert_raise_message( TypeError, "score() got an unexpected keyword argument 'sample_weight'", pipe.score, X, sample_weight=np.array([2, 3]) ) def test_pipeline_raise_set_params_error(): # Test pipeline raises set params error message for nested models. pipe = Pipeline([('cls', LinearRegression())]) # expected error message error_msg = ('Invalid parameter %s for estimator %s. ' 'Check the list of available parameters ' 'with `estimator.get_params().keys()`.') assert_raise_message(ValueError, error_msg % ('fake', 'Pipeline'), pipe.set_params, fake='nope') # nested model check assert_raise_message(ValueError, error_msg % ("fake", pipe), pipe.set_params, fake__estimator='nope') def test_pipeline_methods_pca_svm(): # Test the various methods of the pipeline (pca + svm). iris = load_iris() X = iris.data y = iris.target # Test with PCA + SVC clf = SVC(probability=True, random_state=0) pca = PCA(svd_solver='full', n_components='mle', whiten=True) pipe = Pipeline([('pca', pca), ('svc', clf)]) pipe.fit(X, y) pipe.predict(X) pipe.predict_proba(X) pipe.predict_log_proba(X) pipe.score(X, y) def test_pipeline_methods_preprocessing_svm(): # Test the various methods of the pipeline (preprocessing + svm). iris = load_iris() X = iris.data y = iris.target n_samples = X.shape[0] n_classes = len(np.unique(y)) scaler = StandardScaler() pca = PCA(n_components=2, svd_solver='randomized', whiten=True) clf = SVC(probability=True, random_state=0, decision_function_shape='ovr') for preprocessing in [scaler, pca]: pipe = Pipeline([('preprocess', preprocessing), ('svc', clf)]) pipe.fit(X, y) # check shapes of various prediction functions predict = pipe.predict(X) assert_equal(predict.shape, (n_samples,)) proba = pipe.predict_proba(X) assert_equal(proba.shape, (n_samples, n_classes)) log_proba = pipe.predict_log_proba(X) assert_equal(log_proba.shape, (n_samples, n_classes)) decision_function = pipe.decision_function(X) assert_equal(decision_function.shape, (n_samples, n_classes)) pipe.score(X, y) def test_fit_predict_on_pipeline(): # test that the fit_predict method is implemented on a pipeline # test that the fit_predict on pipeline yields same results as applying # transform and clustering steps separately iris = load_iris() scaler = StandardScaler() km = KMeans(random_state=0) # As pipeline doesn't clone estimators on construction, # it must have its own estimators scaler_for_pipeline = StandardScaler() km_for_pipeline = KMeans(random_state=0) # first compute the transform and clustering step separately scaled = scaler.fit_transform(iris.data) separate_pred = km.fit_predict(scaled) # use a pipeline to do the transform and clustering in one step pipe = Pipeline([ ('scaler', scaler_for_pipeline), ('Kmeans', km_for_pipeline) ]) pipeline_pred = pipe.fit_predict(iris.data) assert_array_almost_equal(pipeline_pred, separate_pred) def test_fit_predict_on_pipeline_without_fit_predict(): # tests that a pipeline does not have fit_predict method when final # step of pipeline does not have fit_predict defined scaler = StandardScaler() pca = PCA(svd_solver='full') pipe = Pipeline([('scaler', scaler), ('pca', pca)]) assert_raises_regex(AttributeError, "'PCA' object has no attribute 'fit_predict'", getattr, pipe, 'fit_predict') def test_fit_predict_with_intermediate_fit_params(): # tests that Pipeline passes fit_params to intermediate steps # when fit_predict is invoked pipe = Pipeline([('transf', TransfFitParams()), ('clf', FitParamT())]) pipe.fit_predict(X=None, y=None, transf__should_get_this=True, clf__should_succeed=True) assert_true(pipe.named_steps['transf'].fit_params['should_get_this']) assert_true(pipe.named_steps['clf'].successful) assert_false('should_succeed' in pipe.named_steps['transf'].fit_params) def test_feature_union(): # basic sanity check for feature union iris = load_iris() X = iris.data X -= X.mean(axis=0) y = iris.target svd = TruncatedSVD(n_components=2, random_state=0) select = SelectKBest(k=1) fs = FeatureUnion([("svd", svd), ("select", select)]) fs.fit(X, y) X_transformed = fs.transform(X) assert_equal(X_transformed.shape, (X.shape[0], 3)) # check if it does the expected thing assert_array_almost_equal(X_transformed[:, :-1], svd.fit_transform(X)) assert_array_equal(X_transformed[:, -1], select.fit_transform(X, y).ravel()) # test if it also works for sparse input # We use a different svd object to control the random_state stream fs = FeatureUnion([("svd", svd), ("select", select)]) X_sp = sparse.csr_matrix(X) X_sp_transformed = fs.fit_transform(X_sp, y) assert_array_almost_equal(X_transformed, X_sp_transformed.toarray()) # test setting parameters fs.set_params(select__k=2) assert_equal(fs.fit_transform(X, y).shape, (X.shape[0], 4)) # test it works with transformers missing fit_transform fs = FeatureUnion([("mock", Transf()), ("svd", svd), ("select", select)]) X_transformed = fs.fit_transform(X, y) assert_equal(X_transformed.shape, (X.shape[0], 8)) # test error if some elements do not support transform assert_raises_regex(TypeError, 'All estimators should implement fit and ' 'transform.*\\bNoTrans\\b', FeatureUnion, [("transform", Transf()), ("no_transform", NoTrans())]) def test_make_union(): pca = PCA(svd_solver='full') mock = Transf() fu = make_union(pca, mock) names, transformers = zip(*fu.transformer_list) assert_equal(names, ("pca", "transf")) assert_equal(transformers, (pca, mock)) def test_make_union_kwargs(): pca = PCA(svd_solver='full') mock = Transf() fu = make_union(pca, mock, n_jobs=3) assert_equal(fu.transformer_list, make_union(pca, mock).transformer_list) assert_equal(3, fu.n_jobs) # invalid keyword parameters should raise an error message assert_raise_message( TypeError, 'Unknown keyword arguments: "transformer_weights"', make_union, pca, mock, transformer_weights={'pca': 10, 'Transf': 1} ) def test_pipeline_transform(): # Test whether pipeline works with a transformer at the end. # Also test pipeline.transform and pipeline.inverse_transform iris = load_iris() X = iris.data pca = PCA(n_components=2, svd_solver='full') pipeline = Pipeline([('pca', pca)]) # test transform and fit_transform: X_trans = pipeline.fit(X).transform(X) X_trans2 = pipeline.fit_transform(X) X_trans3 = pca.fit_transform(X) assert_array_almost_equal(X_trans, X_trans2) assert_array_almost_equal(X_trans, X_trans3) X_back = pipeline.inverse_transform(X_trans) X_back2 = pca.inverse_transform(X_trans) assert_array_almost_equal(X_back, X_back2) def test_pipeline_fit_transform(): # Test whether pipeline works with a transformer missing fit_transform iris = load_iris() X = iris.data y = iris.target transf = Transf() pipeline = Pipeline([('mock', transf)]) # test fit_transform: X_trans = pipeline.fit_transform(X, y) X_trans2 = transf.fit(X, y).transform(X) assert_array_almost_equal(X_trans, X_trans2) def test_set_pipeline_steps(): transf1 = Transf() transf2 = Transf() pipeline = Pipeline([('mock', transf1)]) assert_true(pipeline.named_steps['mock'] is transf1) # Directly setting attr pipeline.steps = [('mock2', transf2)] assert_true('mock' not in pipeline.named_steps) assert_true(pipeline.named_steps['mock2'] is transf2) assert_equal([('mock2', transf2)], pipeline.steps) # Using set_params pipeline.set_params(steps=[('mock', transf1)]) assert_equal([('mock', transf1)], pipeline.steps) # Using set_params to replace single step pipeline.set_params(mock=transf2) assert_equal([('mock', transf2)], pipeline.steps) # With invalid data pipeline.set_params(steps=[('junk', ())]) assert_raises(TypeError, pipeline.fit, [[1]], [1]) assert_raises(TypeError, pipeline.fit_transform, [[1]], [1]) def test_set_pipeline_step_none(): # Test setting Pipeline steps to None X = np.array([[1]]) y = np.array([1]) mult2 = Mult(mult=2) mult3 = Mult(mult=3) mult5 = Mult(mult=5) def make(): return Pipeline([('m2', mult2), ('m3', mult3), ('last', mult5)]) pipeline = make() exp = 2 * 3 * 5 assert_array_equal([[exp]], pipeline.fit_transform(X, y)) assert_array_equal([exp], pipeline.fit(X).predict(X)) assert_array_equal(X, pipeline.inverse_transform([[exp]])) pipeline.set_params(m3=None) exp = 2 * 5 assert_array_equal([[exp]], pipeline.fit_transform(X, y)) assert_array_equal([exp], pipeline.fit(X).predict(X)) assert_array_equal(X, pipeline.inverse_transform([[exp]])) assert_dict_equal(pipeline.get_params(deep=True), {'steps': pipeline.steps, 'm2': mult2, 'm3': None, 'last': mult5, 'memory': None, 'm2__mult': 2, 'last__mult': 5, }) pipeline.set_params(m2=None) exp = 5 assert_array_equal([[exp]], pipeline.fit_transform(X, y)) assert_array_equal([exp], pipeline.fit(X).predict(X)) assert_array_equal(X, pipeline.inverse_transform([[exp]])) # for other methods, ensure no AttributeErrors on None: other_methods = ['predict_proba', 'predict_log_proba', 'decision_function', 'transform', 'score'] for method in other_methods: getattr(pipeline, method)(X) pipeline.set_params(m2=mult2) exp = 2 * 5 assert_array_equal([[exp]], pipeline.fit_transform(X, y)) assert_array_equal([exp], pipeline.fit(X).predict(X)) assert_array_equal(X, pipeline.inverse_transform([[exp]])) pipeline = make() pipeline.set_params(last=None) # mult2 and mult3 are active exp = 6 assert_array_equal([[exp]], pipeline.fit(X, y).transform(X)) assert_array_equal([[exp]], pipeline.fit_transform(X, y)) assert_array_equal(X, pipeline.inverse_transform([[exp]])) assert_raise_message(AttributeError, "'NoneType' object has no attribute 'predict'", getattr, pipeline, 'predict') # Check None step at construction time exp = 2 * 5 pipeline = Pipeline([('m2', mult2), ('m3', None), ('last', mult5)]) assert_array_equal([[exp]], pipeline.fit_transform(X, y)) assert_array_equal([exp], pipeline.fit(X).predict(X)) assert_array_equal(X, pipeline.inverse_transform([[exp]])) def test_pipeline_ducktyping(): pipeline = make_pipeline(Mult(5)) pipeline.predict pipeline.transform pipeline.inverse_transform pipeline = make_pipeline(Transf()) assert_false(hasattr(pipeline, 'predict')) pipeline.transform pipeline.inverse_transform pipeline = make_pipeline(None) assert_false(hasattr(pipeline, 'predict')) pipeline.transform pipeline.inverse_transform pipeline = make_pipeline(Transf(), NoInvTransf()) assert_false(hasattr(pipeline, 'predict')) pipeline.transform assert_false(hasattr(pipeline, 'inverse_transform')) pipeline = make_pipeline(NoInvTransf(), Transf()) assert_false(hasattr(pipeline, 'predict')) pipeline.transform assert_false(hasattr(pipeline, 'inverse_transform')) def test_make_pipeline(): t1 = Transf() t2 = Transf() pipe = make_pipeline(t1, t2) assert_true(isinstance(pipe, Pipeline)) assert_equal(pipe.steps[0][0], "transf-1") assert_equal(pipe.steps[1][0], "transf-2") pipe = make_pipeline(t1, t2, FitParamT()) assert_true(isinstance(pipe, Pipeline)) assert_equal(pipe.steps[0][0], "transf-1") assert_equal(pipe.steps[1][0], "transf-2") assert_equal(pipe.steps[2][0], "fitparamt") def test_feature_union_weights(): # test feature union with transformer weights iris = load_iris() X = iris.data y = iris.target pca = PCA(n_components=2, svd_solver='randomized', random_state=0) select = SelectKBest(k=1) # test using fit followed by transform fs = FeatureUnion([("pca", pca), ("select", select)], transformer_weights={"pca": 10}) fs.fit(X, y) X_transformed = fs.transform(X) # test using fit_transform fs = FeatureUnion([("pca", pca), ("select", select)], transformer_weights={"pca": 10}) X_fit_transformed = fs.fit_transform(X, y) # test it works with transformers missing fit_transform fs = FeatureUnion([("mock", Transf()), ("pca", pca), ("select", select)], transformer_weights={"mock": 10}) X_fit_transformed_wo_method = fs.fit_transform(X, y) # check against expected result # We use a different pca object to control the random_state stream assert_array_almost_equal(X_transformed[:, :-1], 10 * pca.fit_transform(X)) assert_array_equal(X_transformed[:, -1], select.fit_transform(X, y).ravel()) assert_array_almost_equal(X_fit_transformed[:, :-1], 10 * pca.fit_transform(X)) assert_array_equal(X_fit_transformed[:, -1], select.fit_transform(X, y).ravel()) assert_equal(X_fit_transformed_wo_method.shape, (X.shape[0], 7)) def test_feature_union_parallel(): # test that n_jobs work for FeatureUnion X = JUNK_FOOD_DOCS fs = FeatureUnion([ ("words", CountVectorizer(analyzer='word')), ("chars", CountVectorizer(analyzer='char')), ]) fs_parallel = FeatureUnion([ ("words", CountVectorizer(analyzer='word')), ("chars", CountVectorizer(analyzer='char')), ], n_jobs=2) fs_parallel2 = FeatureUnion([ ("words", CountVectorizer(analyzer='word')), ("chars", CountVectorizer(analyzer='char')), ], n_jobs=2) fs.fit(X) X_transformed = fs.transform(X) assert_equal(X_transformed.shape[0], len(X)) fs_parallel.fit(X) X_transformed_parallel = fs_parallel.transform(X) assert_equal(X_transformed.shape, X_transformed_parallel.shape) assert_array_equal( X_transformed.toarray(), X_transformed_parallel.toarray() ) # fit_transform should behave the same X_transformed_parallel2 = fs_parallel2.fit_transform(X) assert_array_equal( X_transformed.toarray(), X_transformed_parallel2.toarray() ) # transformers should stay fit after fit_transform X_transformed_parallel2 = fs_parallel2.transform(X) assert_array_equal( X_transformed.toarray(), X_transformed_parallel2.toarray() ) def test_feature_union_feature_names(): word_vect = CountVectorizer(analyzer="word") char_vect = CountVectorizer(analyzer="char_wb", ngram_range=(3, 3)) ft = FeatureUnion([("chars", char_vect), ("words", word_vect)]) ft.fit(JUNK_FOOD_DOCS) feature_names = ft.get_feature_names() for feat in feature_names: assert_true("chars__" in feat or "words__" in feat) assert_equal(len(feature_names), 35) ft = FeatureUnion([("tr1", Transf())]).fit([[1]]) assert_raise_message(AttributeError, 'Transformer tr1 (type Transf) does not provide ' 'get_feature_names', ft.get_feature_names) def test_classes_property(): iris = load_iris() X = iris.data y = iris.target reg = make_pipeline(SelectKBest(k=1), LinearRegression()) reg.fit(X, y) assert_raises(AttributeError, getattr, reg, "classes_") clf = make_pipeline(SelectKBest(k=1), LogisticRegression(random_state=0)) assert_raises(AttributeError, getattr, clf, "classes_") clf.fit(X, y) assert_array_equal(clf.classes_, np.unique(y)) def test_set_feature_union_steps(): mult2 = Mult(2) mult2.get_feature_names = lambda: ['x2'] mult3 = Mult(3) mult3.get_feature_names = lambda: ['x3'] mult5 = Mult(5) mult5.get_feature_names = lambda: ['x5'] ft = FeatureUnion([('m2', mult2), ('m3', mult3)]) assert_array_equal([[2, 3]], ft.transform(np.asarray([[1]]))) assert_equal(['m2__x2', 'm3__x3'], ft.get_feature_names()) # Directly setting attr ft.transformer_list = [('m5', mult5)] assert_array_equal([[5]], ft.transform(np.asarray([[1]]))) assert_equal(['m5__x5'], ft.get_feature_names()) # Using set_params ft.set_params(transformer_list=[('mock', mult3)]) assert_array_equal([[3]], ft.transform(np.asarray([[1]]))) assert_equal(['mock__x3'], ft.get_feature_names()) # Using set_params to replace single step ft.set_params(mock=mult5) assert_array_equal([[5]], ft.transform(np.asarray([[1]]))) assert_equal(['mock__x5'], ft.get_feature_names()) def test_set_feature_union_step_none(): mult2 = Mult(2) mult2.get_feature_names = lambda: ['x2'] mult3 = Mult(3) mult3.get_feature_names = lambda: ['x3'] X = np.asarray([[1]]) ft = FeatureUnion([('m2', mult2), ('m3', mult3)]) assert_array_equal([[2, 3]], ft.fit(X).transform(X)) assert_array_equal([[2, 3]], ft.fit_transform(X)) assert_equal(['m2__x2', 'm3__x3'], ft.get_feature_names()) ft.set_params(m2=None) assert_array_equal([[3]], ft.fit(X).transform(X)) assert_array_equal([[3]], ft.fit_transform(X)) assert_equal(['m3__x3'], ft.get_feature_names()) ft.set_params(m3=None) assert_array_equal([[]], ft.fit(X).transform(X)) assert_array_equal([[]], ft.fit_transform(X)) assert_equal([], ft.get_feature_names()) # check we can change back ft.set_params(m3=mult3) assert_array_equal([[3]], ft.fit(X).transform(X)) def test_step_name_validation(): bad_steps1 = [('a__q', Mult(2)), ('b', Mult(3))] bad_steps2 = [('a', Mult(2)), ('a', Mult(3))] for cls, param in [(Pipeline, 'steps'), (FeatureUnion, 'transformer_list')]: # we validate in construction (despite scikit-learn convention) bad_steps3 = [('a', Mult(2)), (param, Mult(3))] for bad_steps, message in [ (bad_steps1, "Step names must not contain __: got ['a__q']"), (bad_steps2, "Names provided are not unique: ['a', 'a']"), (bad_steps3, "Step names conflict with constructor " "arguments: ['%s']" % param), ]: # three ways to make invalid: # - construction assert_raise_message(ValueError, message, cls, **{param: bad_steps}) # - setattr est = cls(**{param: [('a', Mult(1))]}) setattr(est, param, bad_steps) assert_raise_message(ValueError, message, est.fit, [[1]], [1]) assert_raise_message(ValueError, message, est.fit_transform, [[1]], [1]) # - set_params est = cls(**{param: [('a', Mult(1))]}) est.set_params(**{param: bad_steps}) assert_raise_message(ValueError, message, est.fit, [[1]], [1]) assert_raise_message(ValueError, message, est.fit_transform, [[1]], [1]) def test_pipeline_wrong_memory(): # Test that an error is raised when memory is not a string or a Memory # instance iris = load_iris() X = iris.data y = iris.target # Define memory as an integer memory = 1 cached_pipe = Pipeline([('transf', DummyTransf()), ('svc', SVC())], memory=memory) assert_raises_regex(ValueError, "'memory' should either be a string or a" " joblib.Memory instance, got 'memory=1' instead.", cached_pipe.fit, X, y) def test_pipeline_memory(): iris = load_iris() X = iris.data y = iris.target cachedir = mkdtemp() try: memory = Memory(cachedir=cachedir, verbose=10) # Test with Transformer + SVC clf = SVC(probability=True, random_state=0) transf = DummyTransf() pipe = Pipeline([('transf', clone(transf)), ('svc', clf)]) cached_pipe = Pipeline([('transf', transf), ('svc', clf)], memory=memory) # Memoize the transformer at the first fit cached_pipe.fit(X, y) pipe.fit(X, y) # Get the time stamp of the tranformer in the cached pipeline ts = cached_pipe.named_steps['transf'].timestamp_ # Check that cached_pipe and pipe yield identical results assert_array_equal(pipe.predict(X), cached_pipe.predict(X)) assert_array_equal(pipe.predict_proba(X), cached_pipe.predict_proba(X)) assert_array_equal(pipe.predict_log_proba(X), cached_pipe.predict_log_proba(X)) assert_array_equal(pipe.score(X, y), cached_pipe.score(X, y)) assert_array_equal(pipe.named_steps['transf'].means_, cached_pipe.named_steps['transf'].means_) assert_false(hasattr(transf, 'means_')) # Check that we are reading the cache while fitting # a second time cached_pipe.fit(X, y) # Check that cached_pipe and pipe yield identical results assert_array_equal(pipe.predict(X), cached_pipe.predict(X)) assert_array_equal(pipe.predict_proba(X), cached_pipe.predict_proba(X)) assert_array_equal(pipe.predict_log_proba(X), cached_pipe.predict_log_proba(X)) assert_array_equal(pipe.score(X, y), cached_pipe.score(X, y)) assert_array_equal(pipe.named_steps['transf'].means_, cached_pipe.named_steps['transf'].means_) assert_equal(ts, cached_pipe.named_steps['transf'].timestamp_) # Create a new pipeline with cloned estimators # Check that even changing the name step does not affect the cache hit clf_2 = SVC(probability=True, random_state=0) transf_2 = DummyTransf() cached_pipe_2 = Pipeline([('transf_2', transf_2), ('svc', clf_2)], memory=memory) cached_pipe_2.fit(X, y) # Check that cached_pipe and pipe yield identical results assert_array_equal(pipe.predict(X), cached_pipe_2.predict(X)) assert_array_equal(pipe.predict_proba(X), cached_pipe_2.predict_proba(X)) assert_array_equal(pipe.predict_log_proba(X), cached_pipe_2.predict_log_proba(X)) assert_array_equal(pipe.score(X, y), cached_pipe_2.score(X, y)) assert_array_equal(pipe.named_steps['transf'].means_, cached_pipe_2.named_steps['transf_2'].means_) assert_equal(ts, cached_pipe_2.named_steps['transf_2'].timestamp_) finally: shutil.rmtree(cachedir)
bsd-3-clause
thientu/scikit-learn
examples/preprocessing/plot_robust_scaling.py
220
2702
#!/usr/bin/python # -*- coding: utf-8 -*- """ ========================================================= Robust Scaling on Toy Data ========================================================= Making sure that each Feature has approximately the same scale can be a crucial preprocessing step. However, when data contains outliers, :class:`StandardScaler <sklearn.preprocessing.StandardScaler>` can often be mislead. In such cases, it is better to use a scaler that is robust against outliers. Here, we demonstrate this on a toy dataset, where one single datapoint is a large outlier. """ from __future__ import print_function print(__doc__) # Code source: Thomas Unterthiner # License: BSD 3 clause import matplotlib.pyplot as plt import numpy as np from sklearn.preprocessing import StandardScaler, RobustScaler # Create training and test data np.random.seed(42) n_datapoints = 100 Cov = [[0.9, 0.0], [0.0, 20.0]] mu1 = [100.0, -3.0] mu2 = [101.0, -3.0] X1 = np.random.multivariate_normal(mean=mu1, cov=Cov, size=n_datapoints) X2 = np.random.multivariate_normal(mean=mu2, cov=Cov, size=n_datapoints) Y_train = np.hstack([[-1]*n_datapoints, [1]*n_datapoints]) X_train = np.vstack([X1, X2]) X1 = np.random.multivariate_normal(mean=mu1, cov=Cov, size=n_datapoints) X2 = np.random.multivariate_normal(mean=mu2, cov=Cov, size=n_datapoints) Y_test = np.hstack([[-1]*n_datapoints, [1]*n_datapoints]) X_test = np.vstack([X1, X2]) X_train[0, 0] = -1000 # a fairly large outlier # Scale data standard_scaler = StandardScaler() Xtr_s = standard_scaler.fit_transform(X_train) Xte_s = standard_scaler.transform(X_test) robust_scaler = RobustScaler() Xtr_r = robust_scaler.fit_transform(X_train) Xte_r = robust_scaler.fit_transform(X_test) # Plot data fig, ax = plt.subplots(1, 3, figsize=(12, 4)) ax[0].scatter(X_train[:, 0], X_train[:, 1], color=np.where(Y_train > 0, 'r', 'b')) ax[1].scatter(Xtr_s[:, 0], Xtr_s[:, 1], color=np.where(Y_train > 0, 'r', 'b')) ax[2].scatter(Xtr_r[:, 0], Xtr_r[:, 1], color=np.where(Y_train > 0, 'r', 'b')) ax[0].set_title("Unscaled data") ax[1].set_title("After standard scaling (zoomed in)") ax[2].set_title("After robust scaling (zoomed in)") # for the scaled data, we zoom in to the data center (outlier can't be seen!) for a in ax[1:]: a.set_xlim(-3, 3) a.set_ylim(-3, 3) plt.tight_layout() plt.show() # Classify using k-NN from sklearn.neighbors import KNeighborsClassifier knn = KNeighborsClassifier() knn.fit(Xtr_s, Y_train) acc_s = knn.score(Xte_s, Y_test) print("Testset accuracy using standard scaler: %.3f" % acc_s) knn.fit(Xtr_r, Y_train) acc_r = knn.score(Xte_r, Y_test) print("Testset accuracy using robust scaler: %.3f" % acc_r)
bsd-3-clause
ningchi/scikit-learn
examples/decomposition/plot_incremental_pca.py
243
1878
""" =============== Incremental PCA =============== Incremental principal component analysis (IPCA) is typically used as a replacement for principal component analysis (PCA) when the dataset to be decomposed is too large to fit in memory. IPCA builds a low-rank approximation for the input data using an amount of memory which is independent of the number of input data samples. It is still dependent on the input data features, but changing the batch size allows for control of memory usage. This example serves as a visual check that IPCA is able to find a similar projection of the data to PCA (to a sign flip), while only processing a few samples at a time. This can be considered a "toy example", as IPCA is intended for large datasets which do not fit in main memory, requiring incremental approaches. """ print(__doc__) # Authors: Kyle Kastner # License: BSD 3 clause import numpy as np import matplotlib.pyplot as plt from sklearn.datasets import load_iris from sklearn.decomposition import PCA, IncrementalPCA iris = load_iris() X = iris.data y = iris.target n_components = 2 ipca = IncrementalPCA(n_components=n_components, batch_size=10) X_ipca = ipca.fit_transform(X) pca = PCA(n_components=n_components) X_pca = pca.fit_transform(X) for X_transformed, title in [(X_ipca, "Incremental PCA"), (X_pca, "PCA")]: plt.figure(figsize=(8, 8)) for c, i, target_name in zip("rgb", [0, 1, 2], iris.target_names): plt.scatter(X_transformed[y == i, 0], X_transformed[y == i, 1], c=c, label=target_name) if "Incremental" in title: err = np.abs(np.abs(X_pca) - np.abs(X_ipca)).mean() plt.title(title + " of iris dataset\nMean absolute unsigned error " "%.6f" % err) else: plt.title(title + " of iris dataset") plt.legend(loc="best") plt.axis([-4, 4, -1.5, 1.5]) plt.show()
bsd-3-clause
Fireblend/scikit-learn
examples/preprocessing/plot_robust_scaling.py
220
2702
#!/usr/bin/python # -*- coding: utf-8 -*- """ ========================================================= Robust Scaling on Toy Data ========================================================= Making sure that each Feature has approximately the same scale can be a crucial preprocessing step. However, when data contains outliers, :class:`StandardScaler <sklearn.preprocessing.StandardScaler>` can often be mislead. In such cases, it is better to use a scaler that is robust against outliers. Here, we demonstrate this on a toy dataset, where one single datapoint is a large outlier. """ from __future__ import print_function print(__doc__) # Code source: Thomas Unterthiner # License: BSD 3 clause import matplotlib.pyplot as plt import numpy as np from sklearn.preprocessing import StandardScaler, RobustScaler # Create training and test data np.random.seed(42) n_datapoints = 100 Cov = [[0.9, 0.0], [0.0, 20.0]] mu1 = [100.0, -3.0] mu2 = [101.0, -3.0] X1 = np.random.multivariate_normal(mean=mu1, cov=Cov, size=n_datapoints) X2 = np.random.multivariate_normal(mean=mu2, cov=Cov, size=n_datapoints) Y_train = np.hstack([[-1]*n_datapoints, [1]*n_datapoints]) X_train = np.vstack([X1, X2]) X1 = np.random.multivariate_normal(mean=mu1, cov=Cov, size=n_datapoints) X2 = np.random.multivariate_normal(mean=mu2, cov=Cov, size=n_datapoints) Y_test = np.hstack([[-1]*n_datapoints, [1]*n_datapoints]) X_test = np.vstack([X1, X2]) X_train[0, 0] = -1000 # a fairly large outlier # Scale data standard_scaler = StandardScaler() Xtr_s = standard_scaler.fit_transform(X_train) Xte_s = standard_scaler.transform(X_test) robust_scaler = RobustScaler() Xtr_r = robust_scaler.fit_transform(X_train) Xte_r = robust_scaler.fit_transform(X_test) # Plot data fig, ax = plt.subplots(1, 3, figsize=(12, 4)) ax[0].scatter(X_train[:, 0], X_train[:, 1], color=np.where(Y_train > 0, 'r', 'b')) ax[1].scatter(Xtr_s[:, 0], Xtr_s[:, 1], color=np.where(Y_train > 0, 'r', 'b')) ax[2].scatter(Xtr_r[:, 0], Xtr_r[:, 1], color=np.where(Y_train > 0, 'r', 'b')) ax[0].set_title("Unscaled data") ax[1].set_title("After standard scaling (zoomed in)") ax[2].set_title("After robust scaling (zoomed in)") # for the scaled data, we zoom in to the data center (outlier can't be seen!) for a in ax[1:]: a.set_xlim(-3, 3) a.set_ylim(-3, 3) plt.tight_layout() plt.show() # Classify using k-NN from sklearn.neighbors import KNeighborsClassifier knn = KNeighborsClassifier() knn.fit(Xtr_s, Y_train) acc_s = knn.score(Xte_s, Y_test) print("Testset accuracy using standard scaler: %.3f" % acc_s) knn.fit(Xtr_r, Y_train) acc_r = knn.score(Xte_r, Y_test) print("Testset accuracy using robust scaler: %.3f" % acc_r)
bsd-3-clause
abinit/abinit
scripts/post_processing/phondisp2abi.py
1
4163
#! /usr/bin/python # Copyright (C) 2010-2021 ABINIT group # # Written by Matthieu Verstraete in python (compatible v1.9). # This is free software, and you are welcome to redistribute it # under certain conditions (GNU General Public License, # see ~abinit/COPYING or http://www.gnu.org/copyleft/gpl.txt). # # ABINIT is a project of the Universite Catholique de Louvain, # Corning Inc. and other collaborators, see ~abinit/doc/developers/contributors.txt. # Please read ~abinit/doc/biblio/generated_files/bib_acknow.html for suggested # acknowledgments of the ABINIT effort. # # For more information, see https://www.abinit.org . # # This script is to be used with the PHON code (or equivalent) # to calculate frozen phonon frequencies, free energies, etc... # It takes a DISP file of atomic displacements and an SPOSCAR file # with a with a supercell structure (VASP/PHON formats), and creates # the necessary lines for an abinit input file to calculate the # forces in displaced configurations. # See http://chianti.geol.ucl.ac.uk/~dario or # D. Alfe, Computer Physics Communications 180,2622-2633 (2009) # # NOTE: the symmetries in the present (1.28 8/2010) version of PHON # are not functioning properly in some cases. It is your own # responsibility to check it, and has nothing to do with ABINIT. # # How to use: # # 1) run abinit for the relaxed structure with prtposcar 1. # this creates a reference XXX_POSCAR file, which you should rename # POSCAR. # 2) run phon with LSUPER=.true. or (e.g.) phonopy -d --dim="2 2 3" # to create the SPOSCAR and DISP files # 3) run this script (phondisp2abi.py). # 4) copy script output to the abinit input file (removing duplicate # input variables etc...) # 5) run abinit for each of the given datasets (and prtposcar 1 still) # 6) concatenate the resulting XXX_FORCES files into one FORCES file # You also need to include the header lines for each displacement, # which are given by phondisp2abi.py in comments for each dataset # 7) run phon again to get the desired phonons and properties. # # import re import string import numpy import numpy.linalg # # convert PHON DISP and SPOSCAR files into ABINIT datasets with appropriately displaced atoms # fp_disp = open('DISP') lines_disp = fp_disp.readlines() fp_sposcar = open('SPOSCAR') lines_sposcar = fp_sposcar.readlines() # make unit cell input line rprimd = numpy.zeros((3,3)) for idir in range(3): line = lines_sposcar[2+idir] tokens = map(float,string.split(line)) rprimd[0][idir] = tokens[0] rprimd[1][idir] = tokens[1] rprimd[2][idir] = tokens[2] # get equilibirum positions equilxred=[] for line in lines_sposcar[7:]: equilxred.append(numpy.array(map(float,string.split(line)))) # output unit cell input line print "# Add this to the abinit input file to do the PHON displacements" print "# given in the DISP file, with respect to the supercell in SPOSCAR" print "#" print "# Remember the POSCAR files have sorted the atomic types so the positions" print "# and displacements are now type ordered (fix typat, spinat, etc!)" print "#" print "ndtset ", len(lines_disp) print "# supercell lattice vectors " print "acell 1 1 1 Angstr" print "rprim" print " %24.14f %24.14f %24.14f" % (rprimd[0][0], rprimd[1][0], rprimd[2][0]) print " %24.14f %24.14f %24.14f" % (rprimd[0][1], rprimd[1][1], rprimd[2][1]) print " %24.14f %24.14f %24.14f" % (rprimd[0][2], rprimd[1][2], rprimd[2][2]) idtset=1 # for each displacement, for line in lines_disp: tokens = string.split(line) # get displacement in reduced coordinates iatom = int(tokens[1]) dispred = numpy.array(map(float,tokens[2:5])) # add displacement to correct atom xred = list(equilxred) xred[iatom-1] = xred[iatom-1] + dispred # output xred for this dataset print "# add the following line, without the #, to the FORCES file for this dtset, when concatenating" print "# %d %24.14f %24.14f %24.14f" % (iatom, dispred[0], dispred[1], dispred[2]) print "xred%d" % (idtset,) for xred_1at in xred: print " %24.14f %24.14f %24.14f" % (xred_1at[0], xred_1at[1], xred_1at[2]) # increment dataset counter idtset=idtset+1
gpl-3.0
thientu/scikit-learn
sklearn/linear_model/ridge.py
60
44642
""" Ridge regression """ # Author: Mathieu Blondel <mathieu@mblondel.org> # Reuben Fletcher-Costin <reuben.fletchercostin@gmail.com> # Fabian Pedregosa <fabian@fseoane.net> # Michael Eickenberg <michael.eickenberg@nsup.org> # License: BSD 3 clause from abc import ABCMeta, abstractmethod import warnings import numpy as np from scipy import linalg from scipy import sparse from scipy.sparse import linalg as sp_linalg from .base import LinearClassifierMixin, LinearModel, _rescale_data from .sag import sag_solver from .sag_fast import get_max_squared_sum from ..base import RegressorMixin from ..utils.extmath import safe_sparse_dot from ..utils import check_X_y from ..utils import check_array from ..utils import check_consistent_length from ..utils import compute_sample_weight from ..utils import column_or_1d from ..preprocessing import LabelBinarizer from ..grid_search import GridSearchCV from ..externals import six from ..metrics.scorer import check_scoring def _solve_sparse_cg(X, y, alpha, max_iter=None, tol=1e-3, verbose=0): n_samples, n_features = X.shape X1 = sp_linalg.aslinearoperator(X) coefs = np.empty((y.shape[1], n_features)) if n_features > n_samples: def create_mv(curr_alpha): def _mv(x): return X1.matvec(X1.rmatvec(x)) + curr_alpha * x return _mv else: def create_mv(curr_alpha): def _mv(x): return X1.rmatvec(X1.matvec(x)) + curr_alpha * x return _mv for i in range(y.shape[1]): y_column = y[:, i] mv = create_mv(alpha[i]) if n_features > n_samples: # kernel ridge # w = X.T * inv(X X^t + alpha*Id) y C = sp_linalg.LinearOperator( (n_samples, n_samples), matvec=mv, dtype=X.dtype) coef, info = sp_linalg.cg(C, y_column, tol=tol) coefs[i] = X1.rmatvec(coef) else: # linear ridge # w = inv(X^t X + alpha*Id) * X.T y y_column = X1.rmatvec(y_column) C = sp_linalg.LinearOperator( (n_features, n_features), matvec=mv, dtype=X.dtype) coefs[i], info = sp_linalg.cg(C, y_column, maxiter=max_iter, tol=tol) if info < 0: raise ValueError("Failed with error code %d" % info) if max_iter is None and info > 0 and verbose: warnings.warn("sparse_cg did not converge after %d iterations." % info) return coefs def _solve_lsqr(X, y, alpha, max_iter=None, tol=1e-3): n_samples, n_features = X.shape coefs = np.empty((y.shape[1], n_features)) n_iter = np.empty(y.shape[1], dtype=np.int32) # According to the lsqr documentation, alpha = damp^2. sqrt_alpha = np.sqrt(alpha) for i in range(y.shape[1]): y_column = y[:, i] info = sp_linalg.lsqr(X, y_column, damp=sqrt_alpha[i], atol=tol, btol=tol, iter_lim=max_iter) coefs[i] = info[0] n_iter[i] = info[2] return coefs, n_iter def _solve_cholesky(X, y, alpha): # w = inv(X^t X + alpha*Id) * X.T y n_samples, n_features = X.shape n_targets = y.shape[1] A = safe_sparse_dot(X.T, X, dense_output=True) Xy = safe_sparse_dot(X.T, y, dense_output=True) one_alpha = np.array_equal(alpha, len(alpha) * [alpha[0]]) if one_alpha: A.flat[::n_features + 1] += alpha[0] return linalg.solve(A, Xy, sym_pos=True, overwrite_a=True).T else: coefs = np.empty([n_targets, n_features]) for coef, target, current_alpha in zip(coefs, Xy.T, alpha): A.flat[::n_features + 1] += current_alpha coef[:] = linalg.solve(A, target, sym_pos=True, overwrite_a=False).ravel() A.flat[::n_features + 1] -= current_alpha return coefs def _solve_cholesky_kernel(K, y, alpha, sample_weight=None, copy=False): # dual_coef = inv(X X^t + alpha*Id) y n_samples = K.shape[0] n_targets = y.shape[1] if copy: K = K.copy() alpha = np.atleast_1d(alpha) one_alpha = (alpha == alpha[0]).all() has_sw = isinstance(sample_weight, np.ndarray) \ or sample_weight not in [1.0, None] if has_sw: # Unlike other solvers, we need to support sample_weight directly # because K might be a pre-computed kernel. sw = np.sqrt(np.atleast_1d(sample_weight)) y = y * sw[:, np.newaxis] K *= np.outer(sw, sw) if one_alpha: # Only one penalty, we can solve multi-target problems in one time. K.flat[::n_samples + 1] += alpha[0] try: # Note: we must use overwrite_a=False in order to be able to # use the fall-back solution below in case a LinAlgError # is raised dual_coef = linalg.solve(K, y, sym_pos=True, overwrite_a=False) except np.linalg.LinAlgError: warnings.warn("Singular matrix in solving dual problem. Using " "least-squares solution instead.") dual_coef = linalg.lstsq(K, y)[0] # K is expensive to compute and store in memory so change it back in # case it was user-given. K.flat[::n_samples + 1] -= alpha[0] if has_sw: dual_coef *= sw[:, np.newaxis] return dual_coef else: # One penalty per target. We need to solve each target separately. dual_coefs = np.empty([n_targets, n_samples]) for dual_coef, target, current_alpha in zip(dual_coefs, y.T, alpha): K.flat[::n_samples + 1] += current_alpha dual_coef[:] = linalg.solve(K, target, sym_pos=True, overwrite_a=False).ravel() K.flat[::n_samples + 1] -= current_alpha if has_sw: dual_coefs *= sw[np.newaxis, :] return dual_coefs.T def _solve_svd(X, y, alpha): U, s, Vt = linalg.svd(X, full_matrices=False) idx = s > 1e-15 # same default value as scipy.linalg.pinv s_nnz = s[idx][:, np.newaxis] UTy = np.dot(U.T, y) d = np.zeros((s.size, alpha.size)) d[idx] = s_nnz / (s_nnz ** 2 + alpha) d_UT_y = d * UTy return np.dot(Vt.T, d_UT_y).T def ridge_regression(X, y, alpha, sample_weight=None, solver='auto', max_iter=None, tol=1e-3, verbose=0, random_state=None, return_n_iter=False): """Solve the ridge equation by the method of normal equations. Read more in the :ref:`User Guide <ridge_regression>`. Parameters ---------- X : {array-like, sparse matrix, LinearOperator}, shape = [n_samples, n_features] Training data y : array-like, shape = [n_samples] or [n_samples, n_targets] Target values alpha : {float, array-like}, shape = [n_targets] if array-like The l_2 penalty to be used. If an array is passed, penalties are assumed to be specific to targets max_iter : int, optional Maximum number of iterations for conjugate gradient solver. For 'sparse_cg' and 'lsqr' solvers, the default value is determined by scipy.sparse.linalg. For 'sag' solver, the default value is 1000. sample_weight : float or numpy array of shape [n_samples] Individual weights for each sample. If sample_weight is not None and solver='auto', the solver will be set to 'cholesky'. solver : {'auto', 'svd', 'cholesky', 'lsqr', 'sparse_cg'} Solver to use in the computational routines: - 'auto' chooses the solver automatically based on the type of data. - 'svd' uses a Singular Value Decomposition of X to compute the Ridge coefficients. More stable for singular matrices than 'cholesky'. - 'cholesky' uses the standard scipy.linalg.solve function to obtain a closed-form solution via a Cholesky decomposition of dot(X.T, X) - 'sparse_cg' uses the conjugate gradient solver as found in scipy.sparse.linalg.cg. As an iterative algorithm, this solver is more appropriate than 'cholesky' for large-scale data (possibility to set `tol` and `max_iter`). - 'lsqr' uses the dedicated regularized least-squares routine scipy.sparse.linalg.lsqr. It is the fatest but may not be available in old scipy versions. It also uses an iterative procedure. - 'sag' uses a Stochastic Average Gradient descent. It also uses an iterative procedure, and is often faster than other solvers when both n_samples and n_features are large. Note that 'sag' fast convergence is only guaranteed on features with approximately the same scale. You can preprocess the data with a scaler from sklearn.preprocessing. All last four solvers support both dense and sparse data. tol : float Precision of the solution. verbose : int Verbosity level. Setting verbose > 0 will display additional information depending on the solver used. random_state : int seed, RandomState instance, or None (default) The seed of the pseudo random number generator to use when shuffling the data. Used in 'sag' solver. return_n_iter : boolean, default False If True, the method also returns `n_iter`, the actual number of iteration performed by the solver. Returns ------- coef : array, shape = [n_features] or [n_targets, n_features] Weight vector(s). n_iter : int, optional The actual number of iteration performed by the solver. Only returned if `return_n_iter` is True. Notes ----- This function won't compute the intercept. """ # SAG needs X and y columns to be C-contiguous and np.float64 if solver == 'sag': X = check_array(X, accept_sparse=['csr'], dtype=np.float64, order='C') y = check_array(y, dtype=np.float64, ensure_2d=False, order='F') else: X = check_array(X, accept_sparse=['csr', 'csc', 'coo'], dtype=np.float64) y = check_array(y, dtype='numeric', ensure_2d=False) check_consistent_length(X, y) n_samples, n_features = X.shape if y.ndim > 2: raise ValueError("Target y has the wrong shape %s" % str(y.shape)) ravel = False if y.ndim == 1: y = y.reshape(-1, 1) ravel = True n_samples_, n_targets = y.shape if n_samples != n_samples_: raise ValueError("Number of samples in X and y does not correspond:" " %d != %d" % (n_samples, n_samples_)) has_sw = sample_weight is not None if solver == 'auto': # cholesky if it's a dense array and cg in any other case if not sparse.issparse(X) or has_sw: solver = 'cholesky' else: solver = 'sparse_cg' elif solver == 'lsqr' and not hasattr(sp_linalg, 'lsqr'): warnings.warn("""lsqr not available on this machine, falling back to sparse_cg.""") solver = 'sparse_cg' if has_sw: if np.atleast_1d(sample_weight).ndim > 1: raise ValueError("Sample weights must be 1D array or scalar") if solver != 'sag': # SAG supports sample_weight directly. For other solvers, # we implement sample_weight via a simple rescaling. X, y = _rescale_data(X, y, sample_weight) # There should be either 1 or n_targets penalties alpha = np.asarray(alpha).ravel() if alpha.size not in [1, n_targets]: raise ValueError("Number of targets and number of penalties " "do not correspond: %d != %d" % (alpha.size, n_targets)) if alpha.size == 1 and n_targets > 1: alpha = np.repeat(alpha, n_targets) if solver not in ('sparse_cg', 'cholesky', 'svd', 'lsqr', 'sag'): raise ValueError('Solver %s not understood' % solver) n_iter = None if solver == 'sparse_cg': coef = _solve_sparse_cg(X, y, alpha, max_iter, tol, verbose) elif solver == 'lsqr': coef, n_iter = _solve_lsqr(X, y, alpha, max_iter, tol) elif solver == 'cholesky': if n_features > n_samples: K = safe_sparse_dot(X, X.T, dense_output=True) try: dual_coef = _solve_cholesky_kernel(K, y, alpha) coef = safe_sparse_dot(X.T, dual_coef, dense_output=True).T except linalg.LinAlgError: # use SVD solver if matrix is singular solver = 'svd' else: try: coef = _solve_cholesky(X, y, alpha) except linalg.LinAlgError: # use SVD solver if matrix is singular solver = 'svd' elif solver == 'sag': # precompute max_squared_sum for all targets max_squared_sum = get_max_squared_sum(X) coef = np.empty((y.shape[1], n_features)) n_iter = np.empty(y.shape[1], dtype=np.int32) for i, (alpha_i, target) in enumerate(zip(alpha, y.T)): coef_, n_iter_, _ = sag_solver( X, target.ravel(), sample_weight, 'squared', alpha_i, max_iter, tol, verbose, random_state, False, max_squared_sum, dict()) coef[i] = coef_ n_iter[i] = n_iter_ coef = np.asarray(coef) if solver == 'svd': if sparse.issparse(X): raise TypeError('SVD solver does not support sparse' ' inputs currently') coef = _solve_svd(X, y, alpha) if ravel: # When y was passed as a 1d-array, we flatten the coefficients. coef = coef.ravel() if return_n_iter: return coef, n_iter else: return coef class _BaseRidge(six.with_metaclass(ABCMeta, LinearModel)): @abstractmethod def __init__(self, alpha=1.0, fit_intercept=True, normalize=False, copy_X=True, max_iter=None, tol=1e-3, solver="auto", random_state=None): self.alpha = alpha self.fit_intercept = fit_intercept self.normalize = normalize self.copy_X = copy_X self.max_iter = max_iter self.tol = tol self.solver = solver self.random_state = random_state def fit(self, X, y, sample_weight=None): X, y = check_X_y(X, y, ['csr', 'csc', 'coo'], dtype=np.float64, multi_output=True, y_numeric=True) if ((sample_weight is not None) and np.atleast_1d(sample_weight).ndim > 1): raise ValueError("Sample weights must be 1D array or scalar") X, y, X_mean, y_mean, X_std = self._center_data( X, y, self.fit_intercept, self.normalize, self.copy_X, sample_weight=sample_weight) self.coef_, self.n_iter_ = ridge_regression( X, y, alpha=self.alpha, sample_weight=sample_weight, max_iter=self.max_iter, tol=self.tol, solver=self.solver, random_state=self.random_state, return_n_iter=True) self._set_intercept(X_mean, y_mean, X_std) return self class Ridge(_BaseRidge, RegressorMixin): """Linear least squares with l2 regularization. This model solves a regression model where the loss function is the linear least squares function and regularization is given by the l2-norm. Also known as Ridge Regression or Tikhonov regularization. This estimator has built-in support for multi-variate regression (i.e., when y is a 2d-array of shape [n_samples, n_targets]). Read more in the :ref:`User Guide <ridge_regression>`. Parameters ---------- alpha : {float, array-like}, shape (n_targets) Small positive values of alpha improve the conditioning of the problem and reduce the variance of the estimates. Alpha corresponds to ``C^-1`` in other linear models such as LogisticRegression or LinearSVC. If an array is passed, penalties are assumed to be specific to the targets. Hence they must correspond in number. copy_X : boolean, optional, default True If True, X will be copied; else, it may be overwritten. fit_intercept : boolean Whether to calculate the intercept for this model. If set to false, no intercept will be used in calculations (e.g. data is expected to be already centered). max_iter : int, optional Maximum number of iterations for conjugate gradient solver. For 'sparse_cg' and 'lsqr' solvers, the default value is determined by scipy.sparse.linalg. For 'sag' solver, the default value is 1000. normalize : boolean, optional, default False If True, the regressors X will be normalized before regression. solver : {'auto', 'svd', 'cholesky', 'lsqr', 'sparse_cg', 'sag'} Solver to use in the computational routines: - 'auto' chooses the solver automatically based on the type of data. - 'svd' uses a Singular Value Decomposition of X to compute the Ridge coefficients. More stable for singular matrices than 'cholesky'. - 'cholesky' uses the standard scipy.linalg.solve function to obtain a closed-form solution. - 'sparse_cg' uses the conjugate gradient solver as found in scipy.sparse.linalg.cg. As an iterative algorithm, this solver is more appropriate than 'cholesky' for large-scale data (possibility to set `tol` and `max_iter`). - 'lsqr' uses the dedicated regularized least-squares routine scipy.sparse.linalg.lsqr. It is the fatest but may not be available in old scipy versions. It also uses an iterative procedure. - 'sag' uses a Stochastic Average Gradient descent. It also uses an iterative procedure, and is often faster than other solvers when both n_samples and n_features are large. Note that 'sag' fast convergence is only guaranteed on features with approximately the same scale. You can preprocess the data with a scaler from sklearn.preprocessing. All last four solvers support both dense and sparse data. tol : float Precision of the solution. random_state : int seed, RandomState instance, or None (default) The seed of the pseudo random number generator to use when shuffling the data. Used in 'sag' solver. Attributes ---------- coef_ : array, shape (n_features,) or (n_targets, n_features) Weight vector(s). intercept_ : float | array, shape = (n_targets,) Independent term in decision function. Set to 0.0 if ``fit_intercept = False``. n_iter_ : array or None, shape (n_targets,) Actual number of iterations for each target. Available only for sag and lsqr solvers. Other solvers will return None. See also -------- RidgeClassifier, RidgeCV, KernelRidge Examples -------- >>> from sklearn.linear_model import Ridge >>> import numpy as np >>> n_samples, n_features = 10, 5 >>> np.random.seed(0) >>> y = np.random.randn(n_samples) >>> X = np.random.randn(n_samples, n_features) >>> clf = Ridge(alpha=1.0) >>> clf.fit(X, y) # doctest: +NORMALIZE_WHITESPACE Ridge(alpha=1.0, copy_X=True, fit_intercept=True, max_iter=None, normalize=False, random_state=None, solver='auto', tol=0.001) """ def __init__(self, alpha=1.0, fit_intercept=True, normalize=False, copy_X=True, max_iter=None, tol=1e-3, solver="auto", random_state=None): super(Ridge, self).__init__(alpha=alpha, fit_intercept=fit_intercept, normalize=normalize, copy_X=copy_X, max_iter=max_iter, tol=tol, solver=solver, random_state=random_state) def fit(self, X, y, sample_weight=None): """Fit Ridge regression model Parameters ---------- X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training data y : array-like, shape = [n_samples] or [n_samples, n_targets] Target values sample_weight : float or numpy array of shape [n_samples] Individual weights for each sample Returns ------- self : returns an instance of self. """ return super(Ridge, self).fit(X, y, sample_weight=sample_weight) class RidgeClassifier(LinearClassifierMixin, _BaseRidge): """Classifier using Ridge regression. Read more in the :ref:`User Guide <ridge_regression>`. Parameters ---------- alpha : float Small positive values of alpha improve the conditioning of the problem and reduce the variance of the estimates. Alpha corresponds to ``C^-1`` in other linear models such as LogisticRegression or LinearSVC. class_weight : dict or 'balanced', optional Weights associated with classes in the form ``{class_label: weight}``. If not given, all classes are supposed to have weight one. The "balanced" mode uses the values of y to automatically adjust weights inversely proportional to class frequencies in the input data as ``n_samples / (n_classes * np.bincount(y))`` copy_X : boolean, optional, default True If True, X will be copied; else, it may be overwritten. fit_intercept : boolean Whether to calculate the intercept for this model. If set to false, no intercept will be used in calculations (e.g. data is expected to be already centered). max_iter : int, optional Maximum number of iterations for conjugate gradient solver. The default value is determined by scipy.sparse.linalg. normalize : boolean, optional, default False If True, the regressors X will be normalized before regression. solver : {'auto', 'svd', 'cholesky', 'lsqr', 'sparse_cg', 'sag'} Solver to use in the computational routines: - 'auto' chooses the solver automatically based on the type of data. - 'svd' uses a Singular Value Decomposition of X to compute the Ridge coefficients. More stable for singular matrices than 'cholesky'. - 'cholesky' uses the standard scipy.linalg.solve function to obtain a closed-form solution. - 'sparse_cg' uses the conjugate gradient solver as found in scipy.sparse.linalg.cg. As an iterative algorithm, this solver is more appropriate than 'cholesky' for large-scale data (possibility to set `tol` and `max_iter`). - 'lsqr' uses the dedicated regularized least-squares routine scipy.sparse.linalg.lsqr. It is the fatest but may not be available in old scipy versions. It also uses an iterative procedure. - 'sag' uses a Stochastic Average Gradient descent. It also uses an iterative procedure, and is faster than other solvers when both n_samples and n_features are large. tol : float Precision of the solution. random_state : int seed, RandomState instance, or None (default) The seed of the pseudo random number generator to use when shuffling the data. Used in 'sag' solver. Attributes ---------- coef_ : array, shape (n_features,) or (n_classes, n_features) Weight vector(s). intercept_ : float | array, shape = (n_targets,) Independent term in decision function. Set to 0.0 if ``fit_intercept = False``. n_iter_ : array or None, shape (n_targets,) Actual number of iterations for each target. Available only for sag and lsqr solvers. Other solvers will return None. See also -------- Ridge, RidgeClassifierCV Notes ----- For multi-class classification, n_class classifiers are trained in a one-versus-all approach. Concretely, this is implemented by taking advantage of the multi-variate response support in Ridge. """ def __init__(self, alpha=1.0, fit_intercept=True, normalize=False, copy_X=True, max_iter=None, tol=1e-3, class_weight=None, solver="auto", random_state=None): super(RidgeClassifier, self).__init__( alpha=alpha, fit_intercept=fit_intercept, normalize=normalize, copy_X=copy_X, max_iter=max_iter, tol=tol, solver=solver, random_state=random_state) self.class_weight = class_weight def fit(self, X, y, sample_weight=None): """Fit Ridge regression model. Parameters ---------- X : {array-like, sparse matrix}, shape = [n_samples,n_features] Training data y : array-like, shape = [n_samples] Target values sample_weight : float or numpy array of shape (n_samples,) Sample weight. Returns ------- self : returns an instance of self. """ self._label_binarizer = LabelBinarizer(pos_label=1, neg_label=-1) Y = self._label_binarizer.fit_transform(y) if not self._label_binarizer.y_type_.startswith('multilabel'): y = column_or_1d(y, warn=True) if self.class_weight: if sample_weight is None: sample_weight = 1. # modify the sample weights with the corresponding class weight sample_weight = (sample_weight * compute_sample_weight(self.class_weight, y)) super(RidgeClassifier, self).fit(X, Y, sample_weight=sample_weight) return self @property def classes_(self): return self._label_binarizer.classes_ class _RidgeGCV(LinearModel): """Ridge regression with built-in Generalized Cross-Validation It allows efficient Leave-One-Out cross-validation. This class is not intended to be used directly. Use RidgeCV instead. Notes ----- We want to solve (K + alpha*Id)c = y, where K = X X^T is the kernel matrix. Let G = (K + alpha*Id)^-1. Dual solution: c = Gy Primal solution: w = X^T c Compute eigendecomposition K = Q V Q^T. Then G = Q (V + alpha*Id)^-1 Q^T, where (V + alpha*Id) is diagonal. It is thus inexpensive to inverse for many alphas. Let loov be the vector of prediction values for each example when the model was fitted with all examples but this example. loov = (KGY - diag(KG)Y) / diag(I-KG) Let looe be the vector of prediction errors for each example when the model was fitted with all examples but this example. looe = y - loov = c / diag(G) References ---------- http://cbcl.mit.edu/projects/cbcl/publications/ps/MIT-CSAIL-TR-2007-025.pdf http://www.mit.edu/~9.520/spring07/Classes/rlsslides.pdf """ def __init__(self, alphas=(0.1, 1.0, 10.0), fit_intercept=True, normalize=False, scoring=None, copy_X=True, gcv_mode=None, store_cv_values=False): self.alphas = np.asarray(alphas) self.fit_intercept = fit_intercept self.normalize = normalize self.scoring = scoring self.copy_X = copy_X self.gcv_mode = gcv_mode self.store_cv_values = store_cv_values def _pre_compute(self, X, y): # even if X is very sparse, K is usually very dense K = safe_sparse_dot(X, X.T, dense_output=True) v, Q = linalg.eigh(K) QT_y = np.dot(Q.T, y) return v, Q, QT_y def _decomp_diag(self, v_prime, Q): # compute diagonal of the matrix: dot(Q, dot(diag(v_prime), Q^T)) return (v_prime * Q ** 2).sum(axis=-1) def _diag_dot(self, D, B): # compute dot(diag(D), B) if len(B.shape) > 1: # handle case where B is > 1-d D = D[(slice(None), ) + (np.newaxis, ) * (len(B.shape) - 1)] return D * B def _errors(self, alpha, y, v, Q, QT_y): # don't construct matrix G, instead compute action on y & diagonal w = 1.0 / (v + alpha) c = np.dot(Q, self._diag_dot(w, QT_y)) G_diag = self._decomp_diag(w, Q) # handle case where y is 2-d if len(y.shape) != 1: G_diag = G_diag[:, np.newaxis] return (c / G_diag) ** 2, c def _values(self, alpha, y, v, Q, QT_y): # don't construct matrix G, instead compute action on y & diagonal w = 1.0 / (v + alpha) c = np.dot(Q, self._diag_dot(w, QT_y)) G_diag = self._decomp_diag(w, Q) # handle case where y is 2-d if len(y.shape) != 1: G_diag = G_diag[:, np.newaxis] return y - (c / G_diag), c def _pre_compute_svd(self, X, y): if sparse.issparse(X): raise TypeError("SVD not supported for sparse matrices") U, s, _ = linalg.svd(X, full_matrices=0) v = s ** 2 UT_y = np.dot(U.T, y) return v, U, UT_y def _errors_svd(self, alpha, y, v, U, UT_y): w = ((v + alpha) ** -1) - (alpha ** -1) c = np.dot(U, self._diag_dot(w, UT_y)) + (alpha ** -1) * y G_diag = self._decomp_diag(w, U) + (alpha ** -1) if len(y.shape) != 1: # handle case where y is 2-d G_diag = G_diag[:, np.newaxis] return (c / G_diag) ** 2, c def _values_svd(self, alpha, y, v, U, UT_y): w = ((v + alpha) ** -1) - (alpha ** -1) c = np.dot(U, self._diag_dot(w, UT_y)) + (alpha ** -1) * y G_diag = self._decomp_diag(w, U) + (alpha ** -1) if len(y.shape) != 1: # handle case when y is 2-d G_diag = G_diag[:, np.newaxis] return y - (c / G_diag), c def fit(self, X, y, sample_weight=None): """Fit Ridge regression model Parameters ---------- X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training data y : array-like, shape = [n_samples] or [n_samples, n_targets] Target values sample_weight : float or array-like of shape [n_samples] Sample weight Returns ------- self : Returns self. """ X, y = check_X_y(X, y, ['csr', 'csc', 'coo'], dtype=np.float, multi_output=True, y_numeric=True) n_samples, n_features = X.shape X, y, X_mean, y_mean, X_std = LinearModel._center_data( X, y, self.fit_intercept, self.normalize, self.copy_X, sample_weight=sample_weight) gcv_mode = self.gcv_mode with_sw = len(np.shape(sample_weight)) if gcv_mode is None or gcv_mode == 'auto': if sparse.issparse(X) or n_features > n_samples or with_sw: gcv_mode = 'eigen' else: gcv_mode = 'svd' elif gcv_mode == "svd" and with_sw: # FIXME non-uniform sample weights not yet supported warnings.warn("non-uniform sample weights unsupported for svd, " "forcing usage of eigen") gcv_mode = 'eigen' if gcv_mode == 'eigen': _pre_compute = self._pre_compute _errors = self._errors _values = self._values elif gcv_mode == 'svd': # assert n_samples >= n_features _pre_compute = self._pre_compute_svd _errors = self._errors_svd _values = self._values_svd else: raise ValueError('bad gcv_mode "%s"' % gcv_mode) v, Q, QT_y = _pre_compute(X, y) n_y = 1 if len(y.shape) == 1 else y.shape[1] cv_values = np.zeros((n_samples * n_y, len(self.alphas))) C = [] scorer = check_scoring(self, scoring=self.scoring, allow_none=True) error = scorer is None for i, alpha in enumerate(self.alphas): weighted_alpha = (sample_weight * alpha if sample_weight is not None else alpha) if error: out, c = _errors(weighted_alpha, y, v, Q, QT_y) else: out, c = _values(weighted_alpha, y, v, Q, QT_y) cv_values[:, i] = out.ravel() C.append(c) if error: best = cv_values.mean(axis=0).argmin() else: # The scorer want an object that will make the predictions but # they are already computed efficiently by _RidgeGCV. This # identity_estimator will just return them def identity_estimator(): pass identity_estimator.decision_function = lambda y_predict: y_predict identity_estimator.predict = lambda y_predict: y_predict out = [scorer(identity_estimator, y.ravel(), cv_values[:, i]) for i in range(len(self.alphas))] best = np.argmax(out) self.alpha_ = self.alphas[best] self.dual_coef_ = C[best] self.coef_ = safe_sparse_dot(self.dual_coef_.T, X) self._set_intercept(X_mean, y_mean, X_std) if self.store_cv_values: if len(y.shape) == 1: cv_values_shape = n_samples, len(self.alphas) else: cv_values_shape = n_samples, n_y, len(self.alphas) self.cv_values_ = cv_values.reshape(cv_values_shape) return self class _BaseRidgeCV(LinearModel): def __init__(self, alphas=(0.1, 1.0, 10.0), fit_intercept=True, normalize=False, scoring=None, cv=None, gcv_mode=None, store_cv_values=False): self.alphas = alphas self.fit_intercept = fit_intercept self.normalize = normalize self.scoring = scoring self.cv = cv self.gcv_mode = gcv_mode self.store_cv_values = store_cv_values def fit(self, X, y, sample_weight=None): """Fit Ridge regression model Parameters ---------- X : array-like, shape = [n_samples, n_features] Training data y : array-like, shape = [n_samples] or [n_samples, n_targets] Target values sample_weight : float or array-like of shape [n_samples] Sample weight Returns ------- self : Returns self. """ if self.cv is None: estimator = _RidgeGCV(self.alphas, fit_intercept=self.fit_intercept, normalize=self.normalize, scoring=self.scoring, gcv_mode=self.gcv_mode, store_cv_values=self.store_cv_values) estimator.fit(X, y, sample_weight=sample_weight) self.alpha_ = estimator.alpha_ if self.store_cv_values: self.cv_values_ = estimator.cv_values_ else: if self.store_cv_values: raise ValueError("cv!=None and store_cv_values=True " " are incompatible") parameters = {'alpha': self.alphas} fit_params = {'sample_weight': sample_weight} gs = GridSearchCV(Ridge(fit_intercept=self.fit_intercept), parameters, fit_params=fit_params, cv=self.cv) gs.fit(X, y) estimator = gs.best_estimator_ self.alpha_ = gs.best_estimator_.alpha self.coef_ = estimator.coef_ self.intercept_ = estimator.intercept_ return self class RidgeCV(_BaseRidgeCV, RegressorMixin): """Ridge regression with built-in cross-validation. By default, it performs Generalized Cross-Validation, which is a form of efficient Leave-One-Out cross-validation. Read more in the :ref:`User Guide <ridge_regression>`. Parameters ---------- alphas : numpy array of shape [n_alphas] Array of alpha values to try. Small positive values of alpha improve the conditioning of the problem and reduce the variance of the estimates. Alpha corresponds to ``C^-1`` in other linear models such as LogisticRegression or LinearSVC. fit_intercept : boolean Whether to calculate the intercept for this model. If set to false, no intercept will be used in calculations (e.g. data is expected to be already centered). normalize : boolean, optional, default False If True, the regressors X will be normalized before regression. scoring : string, callable or None, optional, default: None A string (see model evaluation documentation) or a scorer callable object / function with signature ``scorer(estimator, X, y)``. cv : int, cross-validation generator or an iterable, optional Determines the cross-validation splitting strategy. Possible inputs for cv are: - None, to use the default 3-fold cross-validation, - integer, to specify the number of folds. - An object to be used as a cross-validation generator. - An iterable yielding train/test splits. For integer/None inputs, if ``y`` is binary or multiclass, :class:`StratifiedKFold` used, else, :class:`KFold` is used. Refer :ref:`User Guide <cross_validation>` for the various cross-validation strategies that can be used here. gcv_mode : {None, 'auto', 'svd', eigen'}, optional Flag indicating which strategy to use when performing Generalized Cross-Validation. Options are:: 'auto' : use svd if n_samples > n_features or when X is a sparse matrix, otherwise use eigen 'svd' : force computation via singular value decomposition of X (does not work for sparse matrices) 'eigen' : force computation via eigendecomposition of X^T X The 'auto' mode is the default and is intended to pick the cheaper option of the two depending upon the shape and format of the training data. store_cv_values : boolean, default=False Flag indicating if the cross-validation values corresponding to each alpha should be stored in the `cv_values_` attribute (see below). This flag is only compatible with `cv=None` (i.e. using Generalized Cross-Validation). Attributes ---------- cv_values_ : array, shape = [n_samples, n_alphas] or \ shape = [n_samples, n_targets, n_alphas], optional Cross-validation values for each alpha (if `store_cv_values=True` and \ `cv=None`). After `fit()` has been called, this attribute will \ contain the mean squared errors (by default) or the values of the \ `{loss,score}_func` function (if provided in the constructor). coef_ : array, shape = [n_features] or [n_targets, n_features] Weight vector(s). intercept_ : float | array, shape = (n_targets,) Independent term in decision function. Set to 0.0 if ``fit_intercept = False``. alpha_ : float Estimated regularization parameter. See also -------- Ridge: Ridge regression RidgeClassifier: Ridge classifier RidgeClassifierCV: Ridge classifier with built-in cross validation """ pass class RidgeClassifierCV(LinearClassifierMixin, _BaseRidgeCV): """Ridge classifier with built-in cross-validation. By default, it performs Generalized Cross-Validation, which is a form of efficient Leave-One-Out cross-validation. Currently, only the n_features > n_samples case is handled efficiently. Read more in the :ref:`User Guide <ridge_regression>`. Parameters ---------- alphas : numpy array of shape [n_alphas] Array of alpha values to try. Small positive values of alpha improve the conditioning of the problem and reduce the variance of the estimates. Alpha corresponds to ``C^-1`` in other linear models such as LogisticRegression or LinearSVC. fit_intercept : boolean Whether to calculate the intercept for this model. If set to false, no intercept will be used in calculations (e.g. data is expected to be already centered). normalize : boolean, optional, default False If True, the regressors X will be normalized before regression. scoring : string, callable or None, optional, default: None A string (see model evaluation documentation) or a scorer callable object / function with signature ``scorer(estimator, X, y)``. cv : int, cross-validation generator or an iterable, optional Determines the cross-validation splitting strategy. Possible inputs for cv are: - None, to use the efficient Leave-One-Out cross-validation - integer, to specify the number of folds. - An object to be used as a cross-validation generator. - An iterable yielding train/test splits. Refer :ref:`User Guide <cross_validation>` for the various cross-validation strategies that can be used here. class_weight : dict or 'balanced', optional Weights associated with classes in the form ``{class_label: weight}``. If not given, all classes are supposed to have weight one. The "balanced" mode uses the values of y to automatically adjust weights inversely proportional to class frequencies in the input data as ``n_samples / (n_classes * np.bincount(y))`` Attributes ---------- cv_values_ : array, shape = [n_samples, n_alphas] or \ shape = [n_samples, n_responses, n_alphas], optional Cross-validation values for each alpha (if `store_cv_values=True` and `cv=None`). After `fit()` has been called, this attribute will contain \ the mean squared errors (by default) or the values of the \ `{loss,score}_func` function (if provided in the constructor). coef_ : array, shape = [n_features] or [n_targets, n_features] Weight vector(s). intercept_ : float | array, shape = (n_targets,) Independent term in decision function. Set to 0.0 if ``fit_intercept = False``. alpha_ : float Estimated regularization parameter See also -------- Ridge: Ridge regression RidgeClassifier: Ridge classifier RidgeCV: Ridge regression with built-in cross validation Notes ----- For multi-class classification, n_class classifiers are trained in a one-versus-all approach. Concretely, this is implemented by taking advantage of the multi-variate response support in Ridge. """ def __init__(self, alphas=(0.1, 1.0, 10.0), fit_intercept=True, normalize=False, scoring=None, cv=None, class_weight=None): super(RidgeClassifierCV, self).__init__( alphas=alphas, fit_intercept=fit_intercept, normalize=normalize, scoring=scoring, cv=cv) self.class_weight = class_weight def fit(self, X, y, sample_weight=None): """Fit the ridge classifier. Parameters ---------- X : array-like, shape (n_samples, n_features) Training vectors, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape (n_samples,) Target values. sample_weight : float or numpy array of shape (n_samples,) Sample weight. Returns ------- self : object Returns self. """ self._label_binarizer = LabelBinarizer(pos_label=1, neg_label=-1) Y = self._label_binarizer.fit_transform(y) if not self._label_binarizer.y_type_.startswith('multilabel'): y = column_or_1d(y, warn=True) if self.class_weight: if sample_weight is None: sample_weight = 1. # modify the sample weights with the corresponding class weight sample_weight = (sample_weight * compute_sample_weight(self.class_weight, y)) _BaseRidgeCV.fit(self, X, Y, sample_weight=sample_weight) return self @property def classes_(self): return self._label_binarizer.classes_
bsd-3-clause
Akshay0724/scikit-learn
sklearn/grid_search.py
16
40213
""" The :mod:`sklearn.grid_search` includes utilities to fine-tune the parameters of an estimator. """ from __future__ import print_function # Author: Alexandre Gramfort <alexandre.gramfort@inria.fr>, # Gael Varoquaux <gael.varoquaux@normalesup.org> # Andreas Mueller <amueller@ais.uni-bonn.de> # Olivier Grisel <olivier.grisel@ensta.org> # License: BSD 3 clause from abc import ABCMeta, abstractmethod from collections import Mapping, namedtuple, Sized from functools import partial, reduce from itertools import product import operator import warnings import numpy as np from .base import BaseEstimator, is_classifier, clone from .base import MetaEstimatorMixin from .cross_validation import check_cv from .cross_validation import _fit_and_score from .externals.joblib import Parallel, delayed from .externals import six from .utils import check_random_state from .utils.random import sample_without_replacement from .utils.validation import _num_samples, indexable from .utils.metaestimators import if_delegate_has_method from .metrics.scorer import check_scoring from .exceptions import ChangedBehaviorWarning __all__ = ['GridSearchCV', 'ParameterGrid', 'fit_grid_point', 'ParameterSampler', 'RandomizedSearchCV'] warnings.warn("This module was deprecated in version 0.18 in favor of the " "model_selection module into which all the refactored classes " "and functions are moved. This module will be removed in 0.20.", DeprecationWarning) class ParameterGrid(object): """Grid of parameters with a discrete number of values for each. .. deprecated:: 0.18 This module will be removed in 0.20. Use :class:`sklearn.model_selection.ParameterGrid` instead. Can be used to iterate over parameter value combinations with the Python built-in function iter. Read more in the :ref:`User Guide <grid_search>`. Parameters ---------- param_grid : dict of string to sequence, or sequence of such The parameter grid to explore, as a dictionary mapping estimator parameters to sequences of allowed values. An empty dict signifies default parameters. A sequence of dicts signifies a sequence of grids to search, and is useful to avoid exploring parameter combinations that make no sense or have no effect. See the examples below. Examples -------- >>> from sklearn.grid_search import ParameterGrid >>> param_grid = {'a': [1, 2], 'b': [True, False]} >>> list(ParameterGrid(param_grid)) == ( ... [{'a': 1, 'b': True}, {'a': 1, 'b': False}, ... {'a': 2, 'b': True}, {'a': 2, 'b': False}]) True >>> grid = [{'kernel': ['linear']}, {'kernel': ['rbf'], 'gamma': [1, 10]}] >>> list(ParameterGrid(grid)) == [{'kernel': 'linear'}, ... {'kernel': 'rbf', 'gamma': 1}, ... {'kernel': 'rbf', 'gamma': 10}] True >>> ParameterGrid(grid)[1] == {'kernel': 'rbf', 'gamma': 1} True See also -------- :class:`GridSearchCV`: uses ``ParameterGrid`` to perform a full parallelized parameter search. """ def __init__(self, param_grid): if isinstance(param_grid, Mapping): # wrap dictionary in a singleton list to support either dict # or list of dicts param_grid = [param_grid] self.param_grid = param_grid def __iter__(self): """Iterate over the points in the grid. Returns ------- params : iterator over dict of string to any Yields dictionaries mapping each estimator parameter to one of its allowed values. """ for p in self.param_grid: # Always sort the keys of a dictionary, for reproducibility items = sorted(p.items()) if not items: yield {} else: keys, values = zip(*items) for v in product(*values): params = dict(zip(keys, v)) yield params def __len__(self): """Number of points on the grid.""" # Product function that can handle iterables (np.product can't). product = partial(reduce, operator.mul) return sum(product(len(v) for v in p.values()) if p else 1 for p in self.param_grid) def __getitem__(self, ind): """Get the parameters that would be ``ind``th in iteration Parameters ---------- ind : int The iteration index Returns ------- params : dict of string to any Equal to list(self)[ind] """ # This is used to make discrete sampling without replacement memory # efficient. for sub_grid in self.param_grid: # XXX: could memoize information used here if not sub_grid: if ind == 0: return {} else: ind -= 1 continue # Reverse so most frequent cycling parameter comes first keys, values_lists = zip(*sorted(sub_grid.items())[::-1]) sizes = [len(v_list) for v_list in values_lists] total = np.product(sizes) if ind >= total: # Try the next grid ind -= total else: out = {} for key, v_list, n in zip(keys, values_lists, sizes): ind, offset = divmod(ind, n) out[key] = v_list[offset] return out raise IndexError('ParameterGrid index out of range') class ParameterSampler(object): """Generator on parameters sampled from given distributions. .. deprecated:: 0.18 This module will be removed in 0.20. Use :class:`sklearn.model_selection.ParameterSampler` instead. Non-deterministic iterable over random candidate combinations for hyper- parameter search. If all parameters are presented as a list, sampling without replacement is performed. If at least one parameter is given as a distribution, sampling with replacement is used. It is highly recommended to use continuous distributions for continuous parameters. Note that as of SciPy 0.12, the ``scipy.stats.distributions`` do not accept a custom RNG instance and always use the singleton RNG from ``numpy.random``. Hence setting ``random_state`` will not guarantee a deterministic iteration whenever ``scipy.stats`` distributions are used to define the parameter search space. Read more in the :ref:`User Guide <grid_search>`. Parameters ---------- param_distributions : dict Dictionary where the keys are parameters and values are distributions from which a parameter is to be sampled. Distributions either have to provide a ``rvs`` function to sample from them, or can be given as a list of values, where a uniform distribution is assumed. n_iter : integer Number of parameter settings that are produced. random_state : int or RandomState Pseudo random number generator state used for random uniform sampling from lists of possible values instead of scipy.stats distributions. Returns ------- params : dict of string to any **Yields** dictionaries mapping each estimator parameter to as sampled value. Examples -------- >>> from sklearn.grid_search import ParameterSampler >>> from scipy.stats.distributions import expon >>> import numpy as np >>> np.random.seed(0) >>> param_grid = {'a':[1, 2], 'b': expon()} >>> param_list = list(ParameterSampler(param_grid, n_iter=4)) >>> rounded_list = [dict((k, round(v, 6)) for (k, v) in d.items()) ... for d in param_list] >>> rounded_list == [{'b': 0.89856, 'a': 1}, ... {'b': 0.923223, 'a': 1}, ... {'b': 1.878964, 'a': 2}, ... {'b': 1.038159, 'a': 2}] True """ def __init__(self, param_distributions, n_iter, random_state=None): self.param_distributions = param_distributions self.n_iter = n_iter self.random_state = random_state def __iter__(self): # check if all distributions are given as lists # in this case we want to sample without replacement all_lists = np.all([not hasattr(v, "rvs") for v in self.param_distributions.values()]) rnd = check_random_state(self.random_state) if all_lists: # look up sampled parameter settings in parameter grid param_grid = ParameterGrid(self.param_distributions) grid_size = len(param_grid) if grid_size < self.n_iter: raise ValueError( "The total space of parameters %d is smaller " "than n_iter=%d." % (grid_size, self.n_iter) + " For exhaustive searches, use GridSearchCV.") for i in sample_without_replacement(grid_size, self.n_iter, random_state=rnd): yield param_grid[i] else: # Always sort the keys of a dictionary, for reproducibility items = sorted(self.param_distributions.items()) for _ in six.moves.range(self.n_iter): params = dict() for k, v in items: if hasattr(v, "rvs"): params[k] = v.rvs() else: params[k] = v[rnd.randint(len(v))] yield params def __len__(self): """Number of points that will be sampled.""" return self.n_iter def fit_grid_point(X, y, estimator, parameters, train, test, scorer, verbose, error_score='raise', **fit_params): """Run fit on one set of parameters. .. deprecated:: 0.18 This module will be removed in 0.20. Use :func:`sklearn.model_selection.fit_grid_point` instead. Parameters ---------- X : array-like, sparse matrix or list Input data. y : array-like or None Targets for input data. estimator : estimator object A object of that type is instantiated for each grid point. This is assumed to implement the scikit-learn estimator interface. Either estimator needs to provide a ``score`` function, or ``scoring`` must be passed. parameters : dict Parameters to be set on estimator for this grid point. train : ndarray, dtype int or bool Boolean mask or indices for training set. test : ndarray, dtype int or bool Boolean mask or indices for test set. scorer : callable or None. If provided must be a scorer callable object / function with signature ``scorer(estimator, X, y)``. verbose : int Verbosity level. **fit_params : kwargs Additional parameter passed to the fit function of the estimator. error_score : 'raise' (default) or numeric Value to assign to the score if an error occurs in estimator fitting. If set to 'raise', the error is raised. If a numeric value is given, FitFailedWarning is raised. This parameter does not affect the refit step, which will always raise the error. Returns ------- score : float Score of this parameter setting on given training / test split. parameters : dict The parameters that have been evaluated. n_samples_test : int Number of test samples in this split. """ score, n_samples_test, _ = _fit_and_score(estimator, X, y, scorer, train, test, verbose, parameters, fit_params, error_score) return score, parameters, n_samples_test def _check_param_grid(param_grid): if hasattr(param_grid, 'items'): param_grid = [param_grid] for p in param_grid: for name, v in p.items(): if isinstance(v, np.ndarray) and v.ndim > 1: raise ValueError("Parameter array should be one-dimensional.") check = [isinstance(v, k) for k in (list, tuple, np.ndarray)] if True not in check: raise ValueError("Parameter values for parameter ({0}) need " "to be a sequence.".format(name)) if len(v) == 0: raise ValueError("Parameter values for parameter ({0}) need " "to be a non-empty sequence.".format(name)) class _CVScoreTuple (namedtuple('_CVScoreTuple', ('parameters', 'mean_validation_score', 'cv_validation_scores'))): # A raw namedtuple is very memory efficient as it packs the attributes # in a struct to get rid of the __dict__ of attributes in particular it # does not copy the string for the keys on each instance. # By deriving a namedtuple class just to introduce the __repr__ method we # would also reintroduce the __dict__ on the instance. By telling the # Python interpreter that this subclass uses static __slots__ instead of # dynamic attributes. Furthermore we don't need any additional slot in the # subclass so we set __slots__ to the empty tuple. __slots__ = () def __repr__(self): """Simple custom repr to summarize the main info""" return "mean: {0:.5f}, std: {1:.5f}, params: {2}".format( self.mean_validation_score, np.std(self.cv_validation_scores), self.parameters) class BaseSearchCV(six.with_metaclass(ABCMeta, BaseEstimator, MetaEstimatorMixin)): """Base class for hyper parameter search with cross-validation.""" @abstractmethod def __init__(self, estimator, scoring=None, fit_params=None, n_jobs=1, iid=True, refit=True, cv=None, verbose=0, pre_dispatch='2*n_jobs', error_score='raise'): self.scoring = scoring self.estimator = estimator self.n_jobs = n_jobs self.fit_params = fit_params if fit_params is not None else {} self.iid = iid self.refit = refit self.cv = cv self.verbose = verbose self.pre_dispatch = pre_dispatch self.error_score = error_score @property def _estimator_type(self): return self.estimator._estimator_type @property def classes_(self): return self.best_estimator_.classes_ def score(self, X, y=None): """Returns the score on the given data, if the estimator has been refit. This uses the score defined by ``scoring`` where provided, and the ``best_estimator_.score`` method otherwise. Parameters ---------- X : array-like, shape = [n_samples, n_features] Input data, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape = [n_samples] or [n_samples, n_output], optional Target relative to X for classification or regression; None for unsupervised learning. Returns ------- score : float Notes ----- * The long-standing behavior of this method changed in version 0.16. * It no longer uses the metric provided by ``estimator.score`` if the ``scoring`` parameter was set when fitting. """ if self.scorer_ is None: raise ValueError("No score function explicitly defined, " "and the estimator doesn't provide one %s" % self.best_estimator_) if self.scoring is not None and hasattr(self.best_estimator_, 'score'): warnings.warn("The long-standing behavior to use the estimator's " "score function in {0}.score has changed. The " "scoring parameter is now used." "".format(self.__class__.__name__), ChangedBehaviorWarning) return self.scorer_(self.best_estimator_, X, y) @if_delegate_has_method(delegate=('best_estimator_', 'estimator')) def predict(self, X): """Call predict on the estimator with the best found parameters. Only available if ``refit=True`` and the underlying estimator supports ``predict``. Parameters ----------- X : indexable, length n_samples Must fulfill the input assumptions of the underlying estimator. """ return self.best_estimator_.predict(X) @if_delegate_has_method(delegate=('best_estimator_', 'estimator')) def predict_proba(self, X): """Call predict_proba on the estimator with the best found parameters. Only available if ``refit=True`` and the underlying estimator supports ``predict_proba``. Parameters ----------- X : indexable, length n_samples Must fulfill the input assumptions of the underlying estimator. """ return self.best_estimator_.predict_proba(X) @if_delegate_has_method(delegate=('best_estimator_', 'estimator')) def predict_log_proba(self, X): """Call predict_log_proba on the estimator with the best found parameters. Only available if ``refit=True`` and the underlying estimator supports ``predict_log_proba``. Parameters ----------- X : indexable, length n_samples Must fulfill the input assumptions of the underlying estimator. """ return self.best_estimator_.predict_log_proba(X) @if_delegate_has_method(delegate=('best_estimator_', 'estimator')) def decision_function(self, X): """Call decision_function on the estimator with the best found parameters. Only available if ``refit=True`` and the underlying estimator supports ``decision_function``. Parameters ----------- X : indexable, length n_samples Must fulfill the input assumptions of the underlying estimator. """ return self.best_estimator_.decision_function(X) @if_delegate_has_method(delegate=('best_estimator_', 'estimator')) def transform(self, X): """Call transform on the estimator with the best found parameters. Only available if the underlying estimator supports ``transform`` and ``refit=True``. Parameters ----------- X : indexable, length n_samples Must fulfill the input assumptions of the underlying estimator. """ return self.best_estimator_.transform(X) @if_delegate_has_method(delegate=('best_estimator_', 'estimator')) def inverse_transform(self, Xt): """Call inverse_transform on the estimator with the best found parameters. Only available if the underlying estimator implements ``inverse_transform`` and ``refit=True``. Parameters ----------- Xt : indexable, length n_samples Must fulfill the input assumptions of the underlying estimator. """ return self.best_estimator_.transform(Xt) def _fit(self, X, y, parameter_iterable): """Actual fitting, performing the search over parameters.""" estimator = self.estimator cv = self.cv self.scorer_ = check_scoring(self.estimator, scoring=self.scoring) n_samples = _num_samples(X) X, y = indexable(X, y) if y is not None: if len(y) != n_samples: raise ValueError('Target variable (y) has a different number ' 'of samples (%i) than data (X: %i samples)' % (len(y), n_samples)) cv = check_cv(cv, X, y, classifier=is_classifier(estimator)) if self.verbose > 0: if isinstance(parameter_iterable, Sized): n_candidates = len(parameter_iterable) print("Fitting {0} folds for each of {1} candidates, totalling" " {2} fits".format(len(cv), n_candidates, n_candidates * len(cv))) base_estimator = clone(self.estimator) pre_dispatch = self.pre_dispatch out = Parallel( n_jobs=self.n_jobs, verbose=self.verbose, pre_dispatch=pre_dispatch )( delayed(_fit_and_score)(clone(base_estimator), X, y, self.scorer_, train, test, self.verbose, parameters, self.fit_params, return_parameters=True, error_score=self.error_score) for parameters in parameter_iterable for train, test in cv) # Out is a list of triplet: score, estimator, n_test_samples n_fits = len(out) n_folds = len(cv) scores = list() grid_scores = list() for grid_start in range(0, n_fits, n_folds): n_test_samples = 0 score = 0 all_scores = [] for this_score, this_n_test_samples, _, parameters in \ out[grid_start:grid_start + n_folds]: all_scores.append(this_score) if self.iid: this_score *= this_n_test_samples n_test_samples += this_n_test_samples score += this_score if self.iid: score /= float(n_test_samples) else: score /= float(n_folds) scores.append((score, parameters)) # TODO: shall we also store the test_fold_sizes? grid_scores.append(_CVScoreTuple( parameters, score, np.array(all_scores))) # Store the computed scores self.grid_scores_ = grid_scores # Find the best parameters by comparing on the mean validation score: # note that `sorted` is deterministic in the way it breaks ties best = sorted(grid_scores, key=lambda x: x.mean_validation_score, reverse=True)[0] self.best_params_ = best.parameters self.best_score_ = best.mean_validation_score if self.refit: # fit the best estimator using the entire dataset # clone first to work around broken estimators best_estimator = clone(base_estimator).set_params( **best.parameters) if y is not None: best_estimator.fit(X, y, **self.fit_params) else: best_estimator.fit(X, **self.fit_params) self.best_estimator_ = best_estimator return self class GridSearchCV(BaseSearchCV): """Exhaustive search over specified parameter values for an estimator. .. deprecated:: 0.18 This module will be removed in 0.20. Use :class:`sklearn.model_selection.GridSearchCV` instead. Important members are fit, predict. GridSearchCV implements a "fit" and a "score" method. It also implements "predict", "predict_proba", "decision_function", "transform" and "inverse_transform" if they are implemented in the estimator used. The parameters of the estimator used to apply these methods are optimized by cross-validated grid-search over a parameter grid. Read more in the :ref:`User Guide <grid_search>`. Parameters ---------- estimator : estimator object. A object of that type is instantiated for each grid point. This is assumed to implement the scikit-learn estimator interface. Either estimator needs to provide a ``score`` function, or ``scoring`` must be passed. param_grid : dict or list of dictionaries Dictionary with parameters names (string) as keys and lists of parameter settings to try as values, or a list of such dictionaries, in which case the grids spanned by each dictionary in the list are explored. This enables searching over any sequence of parameter settings. scoring : string, callable or None, default=None A string (see model evaluation documentation) or a scorer callable object / function with signature ``scorer(estimator, X, y)``. If ``None``, the ``score`` method of the estimator is used. fit_params : dict, optional Parameters to pass to the fit method. n_jobs: int, default: 1 : The maximum number of estimators fit in parallel. - If -1 all CPUs are used. - If 1 is given, no parallel computing code is used at all, which is useful for debugging. - For ``n_jobs`` below -1, ``(n_cpus + n_jobs + 1)`` are used. For example, with ``n_jobs = -2`` all CPUs but one are used. .. versionchanged:: 0.17 Upgraded to joblib 0.9.3. pre_dispatch : int, or string, optional Controls the number of jobs that get dispatched during parallel execution. Reducing this number can be useful to avoid an explosion of memory consumption when more jobs get dispatched than CPUs can process. This parameter can be: - None, in which case all the jobs are immediately created and spawned. Use this for lightweight and fast-running jobs, to avoid delays due to on-demand spawning of the jobs - An int, giving the exact number of total jobs that are spawned - A string, giving an expression as a function of n_jobs, as in '2*n_jobs' iid : boolean, default=True If True, the data is assumed to be identically distributed across the folds, and the loss minimized is the total loss per sample, and not the mean loss across the folds. cv : int, cross-validation generator or an iterable, optional Determines the cross-validation splitting strategy. Possible inputs for cv are: - None, to use the default 3-fold cross-validation, - integer, to specify the number of folds. - An object to be used as a cross-validation generator. - An iterable yielding train/test splits. For integer/None inputs, if the estimator is a classifier and ``y`` is either binary or multiclass, :class:`sklearn.model_selection.StratifiedKFold` is used. In all other cases, :class:`sklearn.model_selection.KFold` is used. Refer :ref:`User Guide <cross_validation>` for the various cross-validation strategies that can be used here. refit : boolean, default=True Refit the best estimator with the entire dataset. If "False", it is impossible to make predictions using this GridSearchCV instance after fitting. verbose : integer Controls the verbosity: the higher, the more messages. error_score : 'raise' (default) or numeric Value to assign to the score if an error occurs in estimator fitting. If set to 'raise', the error is raised. If a numeric value is given, FitFailedWarning is raised. This parameter does not affect the refit step, which will always raise the error. Examples -------- >>> from sklearn import svm, grid_search, datasets >>> iris = datasets.load_iris() >>> parameters = {'kernel':('linear', 'rbf'), 'C':[1, 10]} >>> svr = svm.SVC() >>> clf = grid_search.GridSearchCV(svr, parameters) >>> clf.fit(iris.data, iris.target) ... # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS GridSearchCV(cv=None, error_score=..., estimator=SVC(C=1.0, cache_size=..., class_weight=..., coef0=..., decision_function_shape='ovr', degree=..., gamma=..., kernel='rbf', max_iter=-1, probability=False, random_state=None, shrinking=True, tol=..., verbose=False), fit_params={}, iid=..., n_jobs=1, param_grid=..., pre_dispatch=..., refit=..., scoring=..., verbose=...) Attributes ---------- grid_scores_ : list of named tuples Contains scores for all parameter combinations in param_grid. Each entry corresponds to one parameter setting. Each named tuple has the attributes: * ``parameters``, a dict of parameter settings * ``mean_validation_score``, the mean score over the cross-validation folds * ``cv_validation_scores``, the list of scores for each fold best_estimator_ : estimator Estimator that was chosen by the search, i.e. estimator which gave highest score (or smallest loss if specified) on the left out data. Not available if refit=False. best_score_ : float Score of best_estimator on the left out data. best_params_ : dict Parameter setting that gave the best results on the hold out data. scorer_ : function Scorer function used on the held out data to choose the best parameters for the model. Notes ------ The parameters selected are those that maximize the score of the left out data, unless an explicit score is passed in which case it is used instead. If `n_jobs` was set to a value higher than one, the data is copied for each point in the grid (and not `n_jobs` times). This is done for efficiency reasons if individual jobs take very little time, but may raise errors if the dataset is large and not enough memory is available. A workaround in this case is to set `pre_dispatch`. Then, the memory is copied only `pre_dispatch` many times. A reasonable value for `pre_dispatch` is `2 * n_jobs`. See Also --------- :class:`ParameterGrid`: generates all the combinations of a hyperparameter grid. :func:`sklearn.cross_validation.train_test_split`: utility function to split the data into a development set usable for fitting a GridSearchCV instance and an evaluation set for its final evaluation. :func:`sklearn.metrics.make_scorer`: Make a scorer from a performance metric or loss function. """ def __init__(self, estimator, param_grid, scoring=None, fit_params=None, n_jobs=1, iid=True, refit=True, cv=None, verbose=0, pre_dispatch='2*n_jobs', error_score='raise'): super(GridSearchCV, self).__init__( estimator, scoring, fit_params, n_jobs, iid, refit, cv, verbose, pre_dispatch, error_score) self.param_grid = param_grid _check_param_grid(param_grid) def fit(self, X, y=None): """Run fit with all sets of parameters. Parameters ---------- X : array-like, shape = [n_samples, n_features] Training vector, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape = [n_samples] or [n_samples, n_output], optional Target relative to X for classification or regression; None for unsupervised learning. """ return self._fit(X, y, ParameterGrid(self.param_grid)) class RandomizedSearchCV(BaseSearchCV): """Randomized search on hyper parameters. .. deprecated:: 0.18 This module will be removed in 0.20. Use :class:`sklearn.model_selection.RandomizedSearchCV` instead. RandomizedSearchCV implements a "fit" and a "score" method. It also implements "predict", "predict_proba", "decision_function", "transform" and "inverse_transform" if they are implemented in the estimator used. The parameters of the estimator used to apply these methods are optimized by cross-validated search over parameter settings. In contrast to GridSearchCV, not all parameter values are tried out, but rather a fixed number of parameter settings is sampled from the specified distributions. The number of parameter settings that are tried is given by n_iter. If all parameters are presented as a list, sampling without replacement is performed. If at least one parameter is given as a distribution, sampling with replacement is used. It is highly recommended to use continuous distributions for continuous parameters. Read more in the :ref:`User Guide <randomized_parameter_search>`. Parameters ---------- estimator : estimator object. A object of that type is instantiated for each grid point. This is assumed to implement the scikit-learn estimator interface. Either estimator needs to provide a ``score`` function, or ``scoring`` must be passed. param_distributions : dict Dictionary with parameters names (string) as keys and distributions or lists of parameters to try. Distributions must provide a ``rvs`` method for sampling (such as those from scipy.stats.distributions). If a list is given, it is sampled uniformly. n_iter : int, default=10 Number of parameter settings that are sampled. n_iter trades off runtime vs quality of the solution. scoring : string, callable or None, default=None A string (see model evaluation documentation) or a scorer callable object / function with signature ``scorer(estimator, X, y)``. If ``None``, the ``score`` method of the estimator is used. fit_params : dict, optional Parameters to pass to the fit method. n_jobs: int, default: 1 : The maximum number of estimators fit in parallel. - If -1 all CPUs are used. - If 1 is given, no parallel computing code is used at all, which is useful for debugging. - For ``n_jobs`` below -1, ``(n_cpus + n_jobs + 1)`` are used. For example, with ``n_jobs = -2`` all CPUs but one are used. pre_dispatch : int, or string, optional Controls the number of jobs that get dispatched during parallel execution. Reducing this number can be useful to avoid an explosion of memory consumption when more jobs get dispatched than CPUs can process. This parameter can be: - None, in which case all the jobs are immediately created and spawned. Use this for lightweight and fast-running jobs, to avoid delays due to on-demand spawning of the jobs - An int, giving the exact number of total jobs that are spawned - A string, giving an expression as a function of n_jobs, as in '2*n_jobs' iid : boolean, default=True If True, the data is assumed to be identically distributed across the folds, and the loss minimized is the total loss per sample, and not the mean loss across the folds. cv : int, cross-validation generator or an iterable, optional Determines the cross-validation splitting strategy. Possible inputs for cv are: - None, to use the default 3-fold cross-validation, - integer, to specify the number of folds. - An object to be used as a cross-validation generator. - An iterable yielding train/test splits. For integer/None inputs, if the estimator is a classifier and ``y`` is either binary or multiclass, :class:`sklearn.model_selection.StratifiedKFold` is used. In all other cases, :class:`sklearn.model_selection.KFold` is used. Refer :ref:`User Guide <cross_validation>` for the various cross-validation strategies that can be used here. refit : boolean, default=True Refit the best estimator with the entire dataset. If "False", it is impossible to make predictions using this RandomizedSearchCV instance after fitting. verbose : integer Controls the verbosity: the higher, the more messages. random_state : int or RandomState Pseudo random number generator state used for random uniform sampling from lists of possible values instead of scipy.stats distributions. error_score : 'raise' (default) or numeric Value to assign to the score if an error occurs in estimator fitting. If set to 'raise', the error is raised. If a numeric value is given, FitFailedWarning is raised. This parameter does not affect the refit step, which will always raise the error. Attributes ---------- grid_scores_ : list of named tuples Contains scores for all parameter combinations in param_grid. Each entry corresponds to one parameter setting. Each named tuple has the attributes: * ``parameters``, a dict of parameter settings * ``mean_validation_score``, the mean score over the cross-validation folds * ``cv_validation_scores``, the list of scores for each fold best_estimator_ : estimator Estimator that was chosen by the search, i.e. estimator which gave highest score (or smallest loss if specified) on the left out data. Not available if refit=False. best_score_ : float Score of best_estimator on the left out data. best_params_ : dict Parameter setting that gave the best results on the hold out data. Notes ----- The parameters selected are those that maximize the score of the held-out data, according to the scoring parameter. If `n_jobs` was set to a value higher than one, the data is copied for each parameter setting(and not `n_jobs` times). This is done for efficiency reasons if individual jobs take very little time, but may raise errors if the dataset is large and not enough memory is available. A workaround in this case is to set `pre_dispatch`. Then, the memory is copied only `pre_dispatch` many times. A reasonable value for `pre_dispatch` is `2 * n_jobs`. See Also -------- :class:`GridSearchCV`: Does exhaustive search over a grid of parameters. :class:`ParameterSampler`: A generator over parameter settings, constructed from param_distributions. """ def __init__(self, estimator, param_distributions, n_iter=10, scoring=None, fit_params=None, n_jobs=1, iid=True, refit=True, cv=None, verbose=0, pre_dispatch='2*n_jobs', random_state=None, error_score='raise'): self.param_distributions = param_distributions self.n_iter = n_iter self.random_state = random_state super(RandomizedSearchCV, self).__init__( estimator=estimator, scoring=scoring, fit_params=fit_params, n_jobs=n_jobs, iid=iid, refit=refit, cv=cv, verbose=verbose, pre_dispatch=pre_dispatch, error_score=error_score) def fit(self, X, y=None): """Run fit on the estimator with randomly drawn parameters. Parameters ---------- X : array-like, shape = [n_samples, n_features] Training vector, where n_samples in the number of samples and n_features is the number of features. y : array-like, shape = [n_samples] or [n_samples, n_output], optional Target relative to X for classification or regression; None for unsupervised learning. """ sampled_params = ParameterSampler(self.param_distributions, self.n_iter, random_state=self.random_state) return self._fit(X, y, sampled_params)
bsd-3-clause
Akshay0724/scikit-learn
examples/ensemble/plot_gradient_boosting_regularization.py
352
2843
""" ================================ Gradient Boosting regularization ================================ Illustration of the effect of different regularization strategies for Gradient Boosting. The example is taken from Hastie et al 2009. The loss function used is binomial deviance. Regularization via shrinkage (``learning_rate < 1.0``) improves performance considerably. In combination with shrinkage, stochastic gradient boosting (``subsample < 1.0``) can produce more accurate models by reducing the variance via bagging. Subsampling without shrinkage usually does poorly. Another strategy to reduce the variance is by subsampling the features analogous to the random splits in Random Forests (via the ``max_features`` parameter). .. [1] T. Hastie, R. Tibshirani and J. Friedman, "Elements of Statistical Learning Ed. 2", Springer, 2009. """ print(__doc__) # Author: Peter Prettenhofer <peter.prettenhofer@gmail.com> # # License: BSD 3 clause import numpy as np import matplotlib.pyplot as plt from sklearn import ensemble from sklearn import datasets X, y = datasets.make_hastie_10_2(n_samples=12000, random_state=1) X = X.astype(np.float32) # map labels from {-1, 1} to {0, 1} labels, y = np.unique(y, return_inverse=True) X_train, X_test = X[:2000], X[2000:] y_train, y_test = y[:2000], y[2000:] original_params = {'n_estimators': 1000, 'max_leaf_nodes': 4, 'max_depth': None, 'random_state': 2, 'min_samples_split': 5} plt.figure() for label, color, setting in [('No shrinkage', 'orange', {'learning_rate': 1.0, 'subsample': 1.0}), ('learning_rate=0.1', 'turquoise', {'learning_rate': 0.1, 'subsample': 1.0}), ('subsample=0.5', 'blue', {'learning_rate': 1.0, 'subsample': 0.5}), ('learning_rate=0.1, subsample=0.5', 'gray', {'learning_rate': 0.1, 'subsample': 0.5}), ('learning_rate=0.1, max_features=2', 'magenta', {'learning_rate': 0.1, 'max_features': 2})]: params = dict(original_params) params.update(setting) clf = ensemble.GradientBoostingClassifier(**params) clf.fit(X_train, y_train) # compute test set deviance test_deviance = np.zeros((params['n_estimators'],), dtype=np.float64) for i, y_pred in enumerate(clf.staged_decision_function(X_test)): # clf.loss_ assumes that y_test[i] in {0, 1} test_deviance[i] = clf.loss_(y_test, y_pred) plt.plot((np.arange(test_deviance.shape[0]) + 1)[::5], test_deviance[::5], '-', color=color, label=label) plt.legend(loc='upper left') plt.xlabel('Boosting Iterations') plt.ylabel('Test Set Deviance') plt.show()
bsd-3-clause
thientu/scikit-learn
examples/ensemble/plot_gradient_boosting_regularization.py
352
2843
""" ================================ Gradient Boosting regularization ================================ Illustration of the effect of different regularization strategies for Gradient Boosting. The example is taken from Hastie et al 2009. The loss function used is binomial deviance. Regularization via shrinkage (``learning_rate < 1.0``) improves performance considerably. In combination with shrinkage, stochastic gradient boosting (``subsample < 1.0``) can produce more accurate models by reducing the variance via bagging. Subsampling without shrinkage usually does poorly. Another strategy to reduce the variance is by subsampling the features analogous to the random splits in Random Forests (via the ``max_features`` parameter). .. [1] T. Hastie, R. Tibshirani and J. Friedman, "Elements of Statistical Learning Ed. 2", Springer, 2009. """ print(__doc__) # Author: Peter Prettenhofer <peter.prettenhofer@gmail.com> # # License: BSD 3 clause import numpy as np import matplotlib.pyplot as plt from sklearn import ensemble from sklearn import datasets X, y = datasets.make_hastie_10_2(n_samples=12000, random_state=1) X = X.astype(np.float32) # map labels from {-1, 1} to {0, 1} labels, y = np.unique(y, return_inverse=True) X_train, X_test = X[:2000], X[2000:] y_train, y_test = y[:2000], y[2000:] original_params = {'n_estimators': 1000, 'max_leaf_nodes': 4, 'max_depth': None, 'random_state': 2, 'min_samples_split': 5} plt.figure() for label, color, setting in [('No shrinkage', 'orange', {'learning_rate': 1.0, 'subsample': 1.0}), ('learning_rate=0.1', 'turquoise', {'learning_rate': 0.1, 'subsample': 1.0}), ('subsample=0.5', 'blue', {'learning_rate': 1.0, 'subsample': 0.5}), ('learning_rate=0.1, subsample=0.5', 'gray', {'learning_rate': 0.1, 'subsample': 0.5}), ('learning_rate=0.1, max_features=2', 'magenta', {'learning_rate': 0.1, 'max_features': 2})]: params = dict(original_params) params.update(setting) clf = ensemble.GradientBoostingClassifier(**params) clf.fit(X_train, y_train) # compute test set deviance test_deviance = np.zeros((params['n_estimators'],), dtype=np.float64) for i, y_pred in enumerate(clf.staged_decision_function(X_test)): # clf.loss_ assumes that y_test[i] in {0, 1} test_deviance[i] = clf.loss_(y_test, y_pred) plt.plot((np.arange(test_deviance.shape[0]) + 1)[::5], test_deviance[::5], '-', color=color, label=label) plt.legend(loc='upper left') plt.xlabel('Boosting Iterations') plt.ylabel('Test Set Deviance') plt.show()
bsd-3-clause
thientu/scikit-learn
sklearn/tests/test_kernel_approximation.py
242
7588
import numpy as np from scipy.sparse import csr_matrix from sklearn.utils.testing import assert_array_equal, assert_equal, assert_true from sklearn.utils.testing import assert_not_equal from sklearn.utils.testing import assert_array_almost_equal, assert_raises from sklearn.utils.testing import assert_less_equal from sklearn.metrics.pairwise import kernel_metrics from sklearn.kernel_approximation import RBFSampler from sklearn.kernel_approximation import AdditiveChi2Sampler from sklearn.kernel_approximation import SkewedChi2Sampler from sklearn.kernel_approximation import Nystroem from sklearn.metrics.pairwise import polynomial_kernel, rbf_kernel # generate data rng = np.random.RandomState(0) X = rng.random_sample(size=(300, 50)) Y = rng.random_sample(size=(300, 50)) X /= X.sum(axis=1)[:, np.newaxis] Y /= Y.sum(axis=1)[:, np.newaxis] def test_additive_chi2_sampler(): # test that AdditiveChi2Sampler approximates kernel on random data # compute exact kernel # appreviations for easier formular X_ = X[:, np.newaxis, :] Y_ = Y[np.newaxis, :, :] large_kernel = 2 * X_ * Y_ / (X_ + Y_) # reduce to n_samples_x x n_samples_y by summing over features kernel = (large_kernel.sum(axis=2)) # approximate kernel mapping transform = AdditiveChi2Sampler(sample_steps=3) X_trans = transform.fit_transform(X) Y_trans = transform.transform(Y) kernel_approx = np.dot(X_trans, Y_trans.T) assert_array_almost_equal(kernel, kernel_approx, 1) X_sp_trans = transform.fit_transform(csr_matrix(X)) Y_sp_trans = transform.transform(csr_matrix(Y)) assert_array_equal(X_trans, X_sp_trans.A) assert_array_equal(Y_trans, Y_sp_trans.A) # test error is raised on negative input Y_neg = Y.copy() Y_neg[0, 0] = -1 assert_raises(ValueError, transform.transform, Y_neg) # test error on invalid sample_steps transform = AdditiveChi2Sampler(sample_steps=4) assert_raises(ValueError, transform.fit, X) # test that the sample interval is set correctly sample_steps_available = [1, 2, 3] for sample_steps in sample_steps_available: # test that the sample_interval is initialized correctly transform = AdditiveChi2Sampler(sample_steps=sample_steps) assert_equal(transform.sample_interval, None) # test that the sample_interval is changed in the fit method transform.fit(X) assert_not_equal(transform.sample_interval_, None) # test that the sample_interval is set correctly sample_interval = 0.3 transform = AdditiveChi2Sampler(sample_steps=4, sample_interval=sample_interval) assert_equal(transform.sample_interval, sample_interval) transform.fit(X) assert_equal(transform.sample_interval_, sample_interval) def test_skewed_chi2_sampler(): # test that RBFSampler approximates kernel on random data # compute exact kernel c = 0.03 # appreviations for easier formular X_c = (X + c)[:, np.newaxis, :] Y_c = (Y + c)[np.newaxis, :, :] # we do it in log-space in the hope that it's more stable # this array is n_samples_x x n_samples_y big x n_features log_kernel = ((np.log(X_c) / 2.) + (np.log(Y_c) / 2.) + np.log(2.) - np.log(X_c + Y_c)) # reduce to n_samples_x x n_samples_y by summing over features in log-space kernel = np.exp(log_kernel.sum(axis=2)) # approximate kernel mapping transform = SkewedChi2Sampler(skewedness=c, n_components=1000, random_state=42) X_trans = transform.fit_transform(X) Y_trans = transform.transform(Y) kernel_approx = np.dot(X_trans, Y_trans.T) assert_array_almost_equal(kernel, kernel_approx, 1) # test error is raised on negative input Y_neg = Y.copy() Y_neg[0, 0] = -1 assert_raises(ValueError, transform.transform, Y_neg) def test_rbf_sampler(): # test that RBFSampler approximates kernel on random data # compute exact kernel gamma = 10. kernel = rbf_kernel(X, Y, gamma=gamma) # approximate kernel mapping rbf_transform = RBFSampler(gamma=gamma, n_components=1000, random_state=42) X_trans = rbf_transform.fit_transform(X) Y_trans = rbf_transform.transform(Y) kernel_approx = np.dot(X_trans, Y_trans.T) error = kernel - kernel_approx assert_less_equal(np.abs(np.mean(error)), 0.01) # close to unbiased np.abs(error, out=error) assert_less_equal(np.max(error), 0.1) # nothing too far off assert_less_equal(np.mean(error), 0.05) # mean is fairly close def test_input_validation(): # Regression test: kernel approx. transformers should work on lists # No assertions; the old versions would simply crash X = [[1, 2], [3, 4], [5, 6]] AdditiveChi2Sampler().fit(X).transform(X) SkewedChi2Sampler().fit(X).transform(X) RBFSampler().fit(X).transform(X) X = csr_matrix(X) RBFSampler().fit(X).transform(X) def test_nystroem_approximation(): # some basic tests rnd = np.random.RandomState(0) X = rnd.uniform(size=(10, 4)) # With n_components = n_samples this is exact X_transformed = Nystroem(n_components=X.shape[0]).fit_transform(X) K = rbf_kernel(X) assert_array_almost_equal(np.dot(X_transformed, X_transformed.T), K) trans = Nystroem(n_components=2, random_state=rnd) X_transformed = trans.fit(X).transform(X) assert_equal(X_transformed.shape, (X.shape[0], 2)) # test callable kernel linear_kernel = lambda X, Y: np.dot(X, Y.T) trans = Nystroem(n_components=2, kernel=linear_kernel, random_state=rnd) X_transformed = trans.fit(X).transform(X) assert_equal(X_transformed.shape, (X.shape[0], 2)) # test that available kernels fit and transform kernels_available = kernel_metrics() for kern in kernels_available: trans = Nystroem(n_components=2, kernel=kern, random_state=rnd) X_transformed = trans.fit(X).transform(X) assert_equal(X_transformed.shape, (X.shape[0], 2)) def test_nystroem_singular_kernel(): # test that nystroem works with singular kernel matrix rng = np.random.RandomState(0) X = rng.rand(10, 20) X = np.vstack([X] * 2) # duplicate samples gamma = 100 N = Nystroem(gamma=gamma, n_components=X.shape[0]).fit(X) X_transformed = N.transform(X) K = rbf_kernel(X, gamma=gamma) assert_array_almost_equal(K, np.dot(X_transformed, X_transformed.T)) assert_true(np.all(np.isfinite(Y))) def test_nystroem_poly_kernel_params(): # Non-regression: Nystroem should pass other parameters beside gamma. rnd = np.random.RandomState(37) X = rnd.uniform(size=(10, 4)) K = polynomial_kernel(X, degree=3.1, coef0=.1) nystroem = Nystroem(kernel="polynomial", n_components=X.shape[0], degree=3.1, coef0=.1) X_transformed = nystroem.fit_transform(X) assert_array_almost_equal(np.dot(X_transformed, X_transformed.T), K) def test_nystroem_callable(): # Test Nystroem on a callable. rnd = np.random.RandomState(42) n_samples = 10 X = rnd.uniform(size=(n_samples, 4)) def logging_histogram_kernel(x, y, log): """Histogram kernel that writes to a log.""" log.append(1) return np.minimum(x, y).sum() kernel_log = [] X = list(X) # test input validation Nystroem(kernel=logging_histogram_kernel, n_components=(n_samples - 1), kernel_params={'log': kernel_log}).fit(X) assert_equal(len(kernel_log), n_samples * (n_samples - 1) / 2)
bsd-3-clause
RecipeML/Recipe
recipe/preprocessors/selectFdr.py
1
1286
# -*- coding: utf-8 -*- """ Copyright 2016 Walter José and Alex de Sá This file is part of the RECIPE Algorithm. The RECIPE is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. RECIPE is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. See http://www.gnu.org/licenses/. """ from sklearn.feature_selection import f_classif, chi2, SelectFdr def selectFdr(args): """Uses scikit-learn's SelectFdr, select the p-values for an estimated false discovery rate. Parameters ---------- score_func : callable Function taking two arrays X and y, and returning a pair of arrays (scores, pvalues). alpha : float, optional The highest uncorrected p-value for features to keep. """ if(args[2]=="chi2"): selector = SelectFdr(chi2, alpha=float(args[1])) elif(args[2]=="f_classif"): selector = SelectFdr(f_classif, alpha=float(args[1])) return selector
gpl-3.0
DistrictDataLabs/yellowbrick
setup.py
1
5534
#!/usr/bin/env python # setup # Setup script for installing yellowbrick # # Author: Benjamin Bengfort # Created: Wed May 18 14:33:26 2016 -0400 # # Copyright (C) 2016 The scikit-yb developers # For license information, see LICENSE.txt and NOTICE.md # # ID: setup.py [c4f3ba7] benjamin@bengfort.com $ """ Setup script for installing yellowbrick. See http://bbengfort.github.io/programmer/2016/01/20/packaging-with-pypi.html """ ########################################################################## ## Imports ########################################################################## import os import codecs from setuptools import setup from setuptools import find_packages ########################################################################## ## Package Information ########################################################################## ## Basic information ## Basic information NAME = "yellowbrick" DESCRIPTION = "A suite of visual analysis and diagnostic tools for machine learning." AUTHOR = "The scikit-yb developers" EMAIL = "yellowbrick@googlegroups.com" MAINTAINER = "The scikit-yb developers" LICENSE = "Apache 2" REPOSITORY = "https://github.com/DistrictDataLabs/yellowbrick" PACKAGE = "yellowbrick" URL = "http://scikit-yb.org/" ## Define the keywords KEYWORDS = ( "visualization", "machine learning", "scikit-learn", "matplotlib", "data science", ) ## Define the classifiers ## See https://pypi.python.org/pypi?%3Aaction=list_classifiers CLASSIFIERS = ( "Development Status :: 5 - Production/Stable", "Intended Audience :: Developers", "Intended Audience :: Science/Research", "License :: OSI Approved :: Apache Software License", "Natural Language :: English", "Operating System :: OS Independent", "Programming Language :: Python", "Programming Language :: Python :: 3.5", "Programming Language :: Python :: 3.6", "Programming Language :: Python :: 3.7", "Programming Language :: Python :: 3.8", "Programming Language :: Python :: 3.9", "Topic :: Software Development", "Topic :: Software Development :: Libraries :: Python Modules", "Topic :: Scientific/Engineering :: Visualization", ) ## Important Paths PROJECT = os.path.abspath(os.path.dirname(__file__)) REQUIRE_PATH = "requirements.txt" VERSION_PATH = os.path.join(PACKAGE, "version.py") PKG_DESCRIBE = "DESCRIPTION.md" ## Directories to ignore in find_packages EXCLUDES = ( "tests", "tests.*", "bin", "docs", "docs.*", "fixtures", "register", "notebooks", "notebooks.*", "examples", "examples.*", "binder", "binder.*", "paper", ) ########################################################################## ## Helper Functions ########################################################################## def read(*parts): """ Assume UTF-8 encoding and return the contents of the file located at the absolute path from the REPOSITORY joined with *parts. """ with codecs.open(os.path.join(PROJECT, *parts), "rb", "utf-8") as f: return f.read() def get_version(path=VERSION_PATH): """ Reads the python file defined in the VERSION_PATH to find the get_version function, and executes it to ensure that it is loaded correctly. Separating the version in this way ensures no additional code is executed. """ namespace = {} exec(read(path), namespace) return namespace["get_version"](short=True) def get_requires(path=REQUIRE_PATH): """ Yields a generator of requirements as defined by the REQUIRE_PATH which should point to a requirements.txt output by `pip freeze`. """ for line in read(path).splitlines(): line = line.strip() if line and not line.startswith("#"): yield line def get_description_type(path=PKG_DESCRIBE): """ Returns the long_description_content_type based on the extension of the package describe path (e.g. .txt, .rst, or .md). """ _, ext = os.path.splitext(path) return {".rst": "text/x-rst", ".txt": "text/plain", ".md": "text/markdown"}[ext] ########################################################################## ## Define the configuration ########################################################################## config = { "name": NAME, "version": get_version(), "description": DESCRIPTION, "long_description": read(PKG_DESCRIBE), "long_description_content_type": get_description_type(PKG_DESCRIBE), "classifiers": CLASSIFIERS, "keywords": KEYWORDS, "license": LICENSE, "author": AUTHOR, "author_email": EMAIL, "url": URL, "maintainer": MAINTAINER, "maintainer_email": EMAIL, "project_urls": { "Documentation": URL, "Download": "{}/tarball/v{}".format(REPOSITORY, get_version()), "Source": REPOSITORY, "Tracker": "{}/issues".format(REPOSITORY), }, "download_url": "{}/tarball/v{}".format(REPOSITORY, get_version()), "packages": find_packages(where=PROJECT, exclude=EXCLUDES), "package_data": {"yellowbrick": ["datasets/manifest.json"]}, "zip_safe": False, "entry_points": {"console_scripts": []}, "install_requires": list(get_requires()), "python_requires": ">=3.4, <4" } ########################################################################## ## Run setup script ########################################################################## if __name__ == "__main__": setup(**config)
apache-2.0
gem-pasteur/macsyfinder
macsypy/scripts/macsyfinder.py
1
57734
######################################################################### # MacSyFinder - Detection of macromolecular systems in protein dataset # # using systems modelling and similarity search. # # Authors: Sophie Abby, Bertrand Neron # # Copyright (c) 2014-2022 Institut Pasteur (Paris) and CNRS. # # See the COPYRIGHT file for details # # # # This file is part of MacSyFinder package. # # # # MacSyFinder is free software: you can redistribute it and/or modify # # it under the terms of the GNU General Public License as published by # # the Free Software Foundation, either version 3 of the License, or # # (at your option) any later version. # # # # MacSyFinder is distributed in the hope that it will be useful, # # but WITHOUT ANY WARRANTY; without even the implied warranty of # # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # # GNU General Public License for more details . # # # # You should have received a copy of the GNU General Public License # # along with MacSyFinder (COPYING). # # If not, see <https://www.gnu.org/licenses/>. # ######################################################################### """ Main entrypoint to macsyfinder """ import sys import os import argparse import logging import itertools from operator import attrgetter # To be used with "sorted" from textwrap import dedent import colorlog _log = colorlog.getLogger('macsypy') import pandas as pd import macsypy from macsypy.config import MacsyDefaults, Config from macsypy.cluster import Cluster from macsypy.registries import ModelRegistry, scan_models_dir from macsypy.definition_parser import DefinitionParser from macsypy.search_genes import search_genes from macsypy.database import Indexes, RepliconDB from macsypy.error import OptionError from macsypy import cluster from macsypy.hit import get_best_hits, HitWeight, MultiSystem, LonerMultiSystem, \ sort_model_hits, compute_best_MSHit from macsypy.system import OrderedMatchMaker, UnorderedMatchMaker, System, LikelySystem, UnlikelySystem, HitSystemTracker from macsypy.utils import get_def_to_detect, get_replicon_names from macsypy.profile import ProfileFactory from macsypy.model import ModelBank from macsypy.gene import GeneBank from macsypy.solution import find_best_solutions, combine_clusters, combine_multisystems from macsypy.serialization import TxtSystemSerializer, TxtLikelySystemSerializer, TxtUnikelySystemSerializer, \ TsvSystemSerializer, TsvSolutionSerializer, TsvLikelySystemSerializer, TsvSpecialHitSerializer, TsvRejectedCandidatesSerializer def get_version_message(): """ :return: the long description of the macsyfinder version :rtype: str """ version = macsypy.__version__ py_vers = sys.version.replace('\n', ' ') vers_msg = f"""Macsyfinder {version} using: - Python {py_vers} - NetworkX {macsypy.solution.nx.__version__} - Pandas {pd.__version__} MacsyFinder is distributed under the terms of the GNU General Public License (GPLv3). See the COPYING file for details. If you use this software please cite: {macsypy.__citation__} and don't forget to cite models used: macsydata cite <model> """ return vers_msg def list_models(args): """ :param args: The command line argument once parsed :type args: :class:`argparse.Namespace` object :return: a string representation of all models and submodels installed. :rtype: str """ defaults = MacsyDefaults() config = Config(defaults, args) model_dirs = config.models_dir() registry = ModelRegistry() for model_dir in model_dirs: try: for model_loc in scan_models_dir(model_dir, profile_suffix=config.profile_suffix): registry.add(model_loc) except PermissionError as err: _log.warning(f"{model_dir} is not readable: {err} : skip it.") return str(registry) def parse_args(args): """ :param args: The arguments provided on the command line :type args: List of strings [without the program name] :return: The arguments parsed :rtype: :class:`argparse.Namespace` object. """ parser = argparse.ArgumentParser( epilog="For more details, visit the MacSyFinder website and see the MacSyFinder documentation.", # formatter_class=ArgumentDefaultsHelpRawTextFormatter, formatter_class=argparse.RawTextHelpFormatter, description=dedent(r''' * * * * * * * * * * ** * * ** * * * * * * * __ __ * ____ * * * * ** * || | \/ | __ _ ___ || / ___| _ _ || ___ _ _ * || | |\/| |/ _` |/ __| || \___ \| | | | || | __(_)_ _ __| |___ _ _ || | | | | (_| | (__ || ___) | |_| | || | _|| | ' \/ _` / -_) '_| || |_| |_|\__,_|\___| || |____/ \__, | || |_| |_|_||_\__,_\___|_| * * |___/ * * * * * * * ** * * * * * * * * * * * * * * * MacSyFinder (MSF) - Detection of macromolecular systems in protein datasets using systems modelling and similarity search. ''')) msf_def = MacsyDefaults() # , formatter_class=argparse.RawDescriptionHelpFormatter) # , formatter_class=argparse.ArgumentDefaultsHelpFormatter) parser.add_argument("-m", "--models", nargs='*', default=None, help="""The models to search. The first element must be the name of family models, followed by the name of the models to search. If the name 'all' is in the list of models, all models from the family will be searched. '--models TXSS Flagellum T2SS' means MSF will search for the models TXSS/Flagellum and TXSS/T2SS '--models TXSS all' means MSF will search for all models found in the model package TXSS '--models CRISPRcas/subtyping all' means MSF will search for all models described in the CRISPRCas/subtyping subfamily. (required unless --previous-run is set) """) genome_options = parser.add_argument_group(title="Input dataset options") genome_options.add_argument("--sequence-db", action='store', default=None, help="""Path to the sequence dataset in fasta format. (required unless --previous-run is set) """) genome_options.add_argument("--db-type", choices=['ordered_replicon', 'gembase', 'unordered'], default=None, help='''The type of dataset to deal with. "unordered" corresponds to a non-assembled genome or set of unassembled genes, "ordered_replicon" to an assembled genome, "gembase" to a set of replicons where sequence identifiers follow this convention: ">RepliconName_SequenceID". (required unless --previous-run is set) ''') genome_options.add_argument("--replicon-topology", choices=['linear', 'circular'], default=None, help=f"""The topology of the replicons (this option is meaningful only if the db_type is 'ordered_replicon' or 'gembase'.) (default: {msf_def['replicon_topology']}) """) genome_options.add_argument("--topology-file", default=None, help="""Topology file path. The topology file allows to specify a topology (linear or circular) for each replicon (this option is meaningful only if the db_type is 'ordered_replicon' or 'gembase'. A topology file is a tabular file with two columns: the 1st is the replicon name, and the 2nd the corresponding topology: \"RepliconA\tlinear\" """) genome_options.add_argument("--idx", action='store_true', default=False, help=f"""Forces to build the indexes for the sequence dataset even if they were previously computed and present at the dataset location. (default: {msf_def['idx']})""" ) system_options = parser.add_argument_group(title="Systems detection options") system_options.add_argument("--inter-gene-max-space", action='append', nargs=2, default=None, help="""Co-localization criterion: maximum number of components non-matched by a profile allowed between two matched components for them to be considered contiguous. Option only meaningful for 'ordered' datasets. The first value must name a model, the second a number of components. This option can be repeated several times: "--inter-gene-max-space TXSS/T2SS 12 --inter-gene-max-space TXSS/Flagellum 20 """ ) system_options.add_argument("--min-mandatory-genes-required", action='append', nargs=2, default=None, help="""The minimal number of mandatory genes required for model assessment. The first value must correspond to a model fully qualified name, the second value to an integer. This option can be repeated several times: "--min-mandatory-genes-required TXSS/T2SS 15 --min-mandatory-genes-required TXSS/Flagellum 10" """ ) system_options.add_argument("--min-genes-required", action='append', nargs=2, default=None, help="""The minimal number of genes required for model assessment (includes both 'mandatory' and 'accessory' components). The first value must correspond to a model fully qualified name, the second value to an integer. This option can be repeated several times: "--min-genes-required TXSS/T2SS 15 --min-genes-required TXSS/Flagellum 10 """ ) system_options.add_argument("--max-nb-genes", action='append', nargs=2, default=None, help="""The maximal number of genes to consider a system as full. The first value must correspond to a model name, the second value to an integer. This option can be repeated several times: "--max-nb-genes TXSS/T2SS 5 --max-nb-genes TXSS/Flagellum 10" """ ) system_options.add_argument("--multi-loci", action='store', default=None, help="""Specifies if the system can be detected as a 'scattered' (or multiple-loci-encoded) system. The models are specified as a comma separated list of fully qualified name(s) "--multi-loci model_familyA/model_1,model_familyB/model_2" """) hmmer_options = parser.add_argument_group(title="Options for Hmmer execution and hits filtering") hmmer_options.add_argument('--hmmer', action='store', default=None, help=f"""Path to the hmmsearch program. If not specified, rely on the environment variable PATH (default: {msf_def['hmmer']})""") hmmer_options.add_argument('--e-value-search', action='store', type=float, default=None, help=f"""Maximal e-value for hits to be reported during hmmsearch search. By default MSF set per profile threshold for hmmsearch run (hmmsearch --cut_ga option) for profiles containing the GA bit score threshold. If a profile does not contains the GA bit score the --e-value-search (-E in hmmsearch) is applied to this profile. To applied the --e-value-search to all profiles use the --no-cut-ga option. (default: {msf_def['e_value_search']}) """) cut_ga_group = hmmer_options.add_mutually_exclusive_group() cut_ga_group.add_argument('--no-cut-ga', action='store_true', default=None, help=f"""By default the MSF try to applied a threshold per profile by using the hmmer -cut-ga option. This is possible only if the GA bit score is present in the profile otherwise MF switch to use the --e-value-search (-E in hmmsearch). If this option is set the --e-value-search option is used for all profiles regardless the presence of the a GA bit score in the profiles. (default: {not msf_def['cut_ga']})""") cut_ga_group.add_argument('--cut-ga', action='store_true', default=None, help=f"""By default the MSF try to applied a threshold per profile by using the hmmer -cut-ga option. This is possible only if the GA bit score is present in the profile otherwise MSF switch to use the --e-value-search (-E in hmmsearch). But the modeler can override this default behavior to do not use cut_ga but --e-value-search instead (-E in hmmsearch). The user can reestablish the general MSF behavior, be sure the profiles contain the GA bit score. (default: {msf_def['cut_ga']})""") hmmer_options.add_argument('--i-evalue-sel', action='store', type=float, default=None, help=f"""Maximal independent e-value for Hmmer hits to be selected for systems detection. (default:{msf_def['i_evalue_sel']})""") hmmer_options.add_argument('--coverage-profile', action='store', type=float, default=None, help=f"""Minimal profile coverage required for the hit alignment with the profile to allow the hit selection for systems detection. (default: {msf_def['coverage_profile']})""") score_options = parser.add_argument_group(title="Score options", description="Options for cluster and systems scoring") score_options.add_argument('--mandatory-weight', action='store', type=float, default=None, help=f"""the weight of a mandatory component in cluster scoring (default:{msf_def['mandatory_weight']})""") score_options.add_argument('--accessory-weight', action='store', type=float, default=None, help=f"""the weight of a accessory component in cluster scoring (default:{msf_def['accessory_weight']})""") # the weight of a mandatory component in cluster scoring # (default:{msf_def['neutral_weight']}) score_options.add_argument('--neutral-weight', action='store', type=float, default=None, help=argparse.SUPPRESS) # the weight modifier for a component which code for itself cluster scoring # (default:{msf_def['itself_weight']})""" score_options.add_argument('--itself-weight', action='store', type=float, default=None, help=argparse.SUPPRESS) score_options.add_argument('--exchangeable-weight', action='store', type=float, default=None, help=f"""the weight modifier for a component which code for exchangeable cluster scoring (default:{msf_def['exchangeable_weight']})""") score_options.add_argument('--redundancy-penalty', action='store', type=float, default=None, help=f"""the weight modifier for cluster which bring a component already presents in other clusters (default:{msf_def['redundancy_penalty']})""") score_options.add_argument('--out-of-cluster', action='store', type=float, default=None, help=f"""the weight modifier for a hit which is a - true loner (not in cluster) - or multi-system (from an other system) (default:{msf_def['out_of_cluster_weight']})""") dir_options = parser.add_argument_group(title="Path options", description=None) dir_options.add_argument('--models-dir', action='store', default=None, help="""Specifies the path to the models if the models are not installed in the canonical place. It gathers definitions (xml files) and HMM profiles arranged in a specific file structure. A directory with the name of the model with at least two directories 'profiles' - which contains HMM profiles for each gene components described in the systems' models 'models' - which contains either the XML files of models' definitions or subdirectories to organize the models in subsystems.""") dir_options.add_argument('-o', '--out-dir', action='store', default=None, help="""Path to the directory where to store output results. if out-dir is specified, res-search-dir will be ignored.""") dir_options.add_argument('--index-dir', action='store', default=None, help="Specifies the path to a directory to store/read the sequence index when the sequence-db dir is not writable.") dir_options.add_argument('--res-search-suffix', action='store', default=None, help="The suffix to give to Hmmer raw output files. " f"(default: {msf_def['res_search_suffix']})") dir_options.add_argument('--res-extract-suffix', action='store', default=None, help="The suffix to give to filtered hits output files. " f"(default: {msf_def['res_extract_suffix']})") dir_options.add_argument('--profile-suffix', action='store', default=None, help=f"""The suffix of profile files. For each 'Gene' element, the corresponding profile is searched in the 'profile_dir', in a file which name is based on the Gene name + the profile suffix. For instance, if the Gene is named 'gspG' and the suffix is '.hmm3', then the profile should be placed at the specified location under the name 'gspG.hmm3' (default: {msf_def['profile_suffix']})""" ) general_options = parser.add_argument_group(title="General options", description=None) general_options.add_argument("-w", "--worker", action='store', type=int, default=None, help=f"""Number of workers to be used by MacSyFinder. In the case the user wants to run MacSyFinder in a multi-thread mode. 0 mean that all threads available will be used. (default: {msf_def['worker']})""" ) general_options.add_argument("-v", "--verbosity", action="count", default=0, help="""Increases the verbosity level. There are 4 levels: Error messages (default), Warning (-v), Info (-vv) and Debug.(-vvv)""") general_options.add_argument("--mute", action="store_true", default=False, help=f"""Mute the log on stdout. (continue to log on macsyfinder.log) (default: {msf_def['mute']})""") general_options.add_argument("--version", action="version", version=get_version_message()) general_options.add_argument("-l", "--list-models", action="store_true", default=False, help="Displays all models installed at generic location and quit.") general_options.add_argument("--cfg-file", action='store', help="Path to a MacSyFinder configuration file to be used. (conflict with --previous-run)") general_options.add_argument("--previous-run", action='store', default=None, help="""Path to a previous MacSyFinder run directory. It allows to skip the Hmmer search step on a same dataset, as it uses previous run results and thus parameters regarding Hmmer detection. The configuration file from this previous run will be used. Conflicts with options: --cfg-file, --sequence-db, --profile-suffix, --res-extract-suffix, --e-value-res, --db-type, --hmmer""") general_options.add_argument("--relative-path", action='store_true', default=False, help=argparse.SUPPRESS) # 'relative-path' option help message (currently hidden) # Use relative paths instead of absolute paths. This option is used # by developers to generate portable data set, as for example test # data set, which are used on many different machines (using previous-run option). parsed_args = parser.parse_args(args) if parsed_args.cfg_file and parsed_args.previous_run: # argparse does not allow to have mutually exclusive option in a argument group # I prefer to have these 2 options in general options group # so I mimic the exclusive_group behavior parser.print_usage() print("macsyfinder: error: argument --previous-run: not allowed with argument --cfg-file") sys.exit(2) return parser, parsed_args def search_systems(config, model_registry, models_def_to_detect, logger): """ Do the job, this function is the orchestrator of all the macsyfinder mechanics at the end several files are produced containing the results - macsyfinder.conf: The set of variables used to runt this job - macsyfinder.systems: The list of the potential systems - macsyfinder.rejected_cluster: The list of all clusters and clustrs combination which has been rejected and the reason - macsyfinder.log: the copy of the standard output :param config: The MacSyFinder Configuration :type config: :class:`macsypy.config.Config` object :param model_registry: the registry of all models :type model_registry: :class:`macsypy.registries.ModelRegistry` object :param models_def_to_detect: the defintions to detect :type models_def_to_detect: list of :class:`macsypy.registries.DefinitionLocation` objects :param logger: The logger use to display information to the user. It must be initialized. see :func:`macsypy.init_logger` :type logger: :class:`colorlog.Logger` object :return: the systems and rejected clusters found :rtype: ([:class:`macsypy.system.System`, ...], [:class:`macsypy.cluster.RejectedCAndidate`, ...]) """ working_dir = config.working_dir() config.save(path_or_buf=os.path.join(working_dir, config.cfg_name)) # build indexes idx = Indexes(config) idx.build(force=config.idx()) # create models model_bank = ModelBank() gene_bank = GeneBank() profile_factory = ProfileFactory(config) parser = DefinitionParser(config, model_bank, gene_bank, model_registry, profile_factory) parser.parse(models_def_to_detect) logger.info(f"MacSyFinder's results will be stored in working_dir{working_dir}") logger.info(f"Analysis launched on {config.sequence_db()} for model(s):") for model in models_def_to_detect: logger.info(f"\t- {model.fqn}") models_to_detect = [model_bank[model_loc.fqn] for model_loc in models_def_to_detect] all_genes = [] for model in models_to_detect: genes = model.mandatory_genes + model.accessory_genes + model.neutral_genes + model.forbidden_genes # Exchangeable (formerly homologs/analogs) are also added because they can "replace" an important gene... ex_genes = [] for m_gene in genes: ex_genes += m_gene.exchangeables all_genes += (genes + ex_genes) ############################################# # this part of code is executed in parallel ############################################# try: all_reports = search_genes(all_genes, config) except Exception as err: raise err sys.exit(str(err)) ############################################# # end of parallel code ############################################# all_hits = [hit for subl in [report.hits for report in all_reports] for hit in subl] if len(all_hits) > 0: # It's important to keep this sorting to have in last all_hits version # the hits with the same replicon_name and position sorted by score # the best score in first hits_by_replicon = {} for hit in all_hits: if hit.replicon_name in hits_by_replicon: hits_by_replicon[hit.replicon_name].append(hit) else: hits_by_replicon[hit.replicon_name] = [hit] for rep_name in hits_by_replicon: hits_by_replicon[rep_name] = get_best_hits(hits_by_replicon[rep_name], key='score') hits_by_replicon[rep_name].sort(key=attrgetter('position')) models_to_detect = sorted(models_to_detect, key=attrgetter('name')) db_type = config.db_type() if db_type in ('ordered_replicon', 'gembase'): systems, rejected_candidates = _search_in_ordered_replicon(hits_by_replicon, models_to_detect, config, logger) return systems, rejected_candidates elif db_type == "unordered": likely_systems, rejected_hits = _search_in_unordered_replicon(hits_by_replicon, models_to_detect, logger) return likely_systems, rejected_hits else: assert False, f"dbtype have an invalid value {db_type}" else: # No hits detected return [], [] def _search_in_ordered_replicon(hits_by_replicon, models_to_detect, config, logger): """ :param hits_by_replicon: :param models_to_detect: :param config: :param logger: :return: """ all_systems = [] all_rejected_candidates = [] rep_db = RepliconDB(config) for rep_name in hits_by_replicon: logger.info(f"\n{f' Hits analysis for replicon {rep_name} ':#^60}") rep_info = rep_db[rep_name] for model in models_to_detect: one_model_systems = [] one_model_rejected_candidates = [] logger.info(f"Check model {model.fqn}") # model.filter filter hit but also cast them in ModelHit mhits_related_one_model = model.filter(hits_by_replicon[rep_name]) logger.debug(f"{f' hits related to {model.name} ':#^80}") hit_header_str = "id\trep_name\tpos\tseq_len\tgene_name\ti_eval\tscore\tprofile_cov\tseq_cov\tbeg_match\tend_match" hits_str = "".join([str(h) for h in mhits_related_one_model]) logger.debug(f"\n{hit_header_str}\n{hits_str}") logger.debug("#" * 80) logger.info("Building clusters") hit_weights = HitWeight(**config.hit_weights()) true_clusters, true_loners = cluster.build_clusters(mhits_related_one_model, rep_info, model, hit_weights) logger.debug(f"{' CLUSTERS ':#^80}") logger.debug("\n" + "\n".join([str(c) for c in true_clusters])) logger.debug(f"{' LONERS ':=^50}") logger.debug("\n" + "\n".join([str(c) for c in true_loners.values() if c.loner])) # logger.debug("{:=^50}".format(" MULTI-SYSTEMS hits ")) # logger.debug("\n" + "\n".join([str(c.hits[0]) for c in special_clusters.values() if c.multi_system])) logger.debug("#" * 80) logger.info("Searching systems") clusters_combination = combine_clusters(true_clusters, true_loners, multi_loci=model.multi_loci) for one_clust_combination in clusters_combination: ordered_matcher = OrderedMatchMaker(model, redundancy_penalty=config.redundancy_penalty()) res = ordered_matcher.match(one_clust_combination) if isinstance(res, System): one_model_systems.append(res) else: one_model_rejected_candidates.append(res) ############################### # MultiSystem Hits Management # ############################### # get multi systems from existing systems # hit_encondig_multisystems = set() # for the same model (in the loop) for one_sys in one_model_systems: hit_encondig_multisystems.update(one_sys.get_hits_encoding_multisystem()) logger.debug(f"{' MultiSystems ':#^80}") logger.debug("\n" + "\n".join([str(c) for c in true_clusters])) # Cast these hits in MultiSystem/LonerMultiSystem multi_systems_hits = [] for hit in hit_encondig_multisystems: if not hit.loner: multi_systems_hits.append(MultiSystem(hit)) else: multi_systems_hits.append(LonerMultiSystem(hit)) # choose the best one ms_per_function = sort_model_hits(multi_systems_hits) best_ms = compute_best_MSHit(ms_per_function) # check if among rejected clusters with the MS, they can be created a new system best_ms = [Cluster([ms], model, hit_weights) for ms in best_ms] new_clst_combination = combine_multisystems(one_model_rejected_candidates, best_ms) for one_clust_combination in new_clst_combination: ordered_matcher = OrderedMatchMaker(model, redundancy_penalty=config.redundancy_penalty()) res = ordered_matcher.match(one_clust_combination) if isinstance(res, System): one_model_systems.append(res) else: one_model_rejected_candidates.append(res) all_systems.extend(one_model_systems) all_rejected_candidates.extend(one_model_rejected_candidates) if all_systems: all_systems.sort(key=lambda syst: (syst.replicon_name, syst.position[0], syst.model.fqn, - syst.score)) if not rep_db.guess_if_really_gembase(): _log.warning( f"Most of replicons contains only ONE sequence are you sure that '{config.sequence_db()}' is a 'gembase'.") return all_systems, all_rejected_candidates def _search_in_unordered_replicon(hits_by_replicon, models_to_detect, logger): """ :param hits_by_replicon: :param models_to_detect: :param logger: :return: """ likely_systems = [] rejected_hits = [] for rep_name in hits_by_replicon: logger.info(f"\n{f' Hits analysis for replicon {rep_name} ':#^60}") for model in models_to_detect: logger.info(f"Check model {model.fqn}") hits_related_one_model = model.filter(hits_by_replicon[rep_name]) logger.debug("{:#^80}".format(" hits related to {} \n".format(model.name))) logger.debug("id\trep_name\tpos\tseq_len\tgene_name\ti_eval\tscore\tprofile_cov\tseq_cov\tbeg_match\tend_match") logger.debug("".join([str(h) for h in hits_related_one_model])) logger.debug("#" * 80) logger.info("Searching systems") hits_related_one_model = model.filter(hits_by_replicon[rep_name]) if hits_related_one_model: unordered_matcher = UnorderedMatchMaker(model) res = unordered_matcher.match(hits_related_one_model) if isinstance(res, LikelySystem): likely_systems.append(res) elif isinstance(res, UnlikelySystem): rejected_hits.append(res) else: logger.info(f"No hits related to {model.fqn } found.") else: logger.info(f"No hits found for model {model.fqn}") if likely_systems: likely_systems.sort(key=lambda syst: (syst.replicon_name, syst.position[0], syst.model.fqn)) return likely_systems, rejected_hits def _outfile_header(models_fam_name, models_version): """ :return: The 2 first lines of each result file :rtype: str """ header = f"""# macsyfinder {macsypy.__version__} # models : {models_fam_name}-{models_version} # {' '.join(sys.argv)}""" return header def systems_to_tsv(models_fam_name, models_version, systems, hit_system_tracker, sys_file): """ print systems occurrences in a file in tabulated format :param systems: list of systems found :type systems: list of :class:`macsypy.system.System` objects :param hit_system_tracker: a filled HitSystemTracker. :type hit_system_tracker: :class:`macsypy.system.HitSystemTracker` object :param sys_file: The file where to write down the systems occurrences :type sys_file: file object :return: None """ print(_outfile_header(models_fam_name, models_version), file=sys_file) if systems: print("# Systems found:", file=sys_file) print(TsvSystemSerializer.header, file=sys_file) for system in systems: sys_serializer = TsvSystemSerializer() print(sys_serializer.serialize(system, hit_system_tracker), file=sys_file) warnings = _loner_warning(systems) if warnings: print("\n".join(warnings), file=sys_file) else: print("# No Systems found", file=sys_file) def systems_to_txt(models_fam_name, models_version, systems, hit_system_tracker, sys_file): """ print systems occurrences in a file in human readable format :param systems: list of systems found :type systems: list of :class:`macsypy.system.System` objects :param hit_system_tracker: a filled HitSystemTracker. :type hit_system_tracker: :class:`macsypy.system.HitSystemTracker` object :param sys_file: The file where to write down the systems occurrences :type sys_file: file object :return: None """ print(_outfile_header(models_fam_name, models_version), file=sys_file) if systems: print("# Systems found:\n", file=sys_file) for system in systems: sys_serializer = TxtSystemSerializer() print(sys_serializer.serialize(system, hit_system_tracker), file=sys_file) print("=" * 60, file=sys_file) warnings = _loner_warning(systems) if warnings: print("\n".join(warnings), file=sys_file) else: print("# No Systems found", file=sys_file) def solutions_to_tsv(models_fam_name, models_version, solutions, hit_system_tracker, sys_file): """ print solution in a file in tabulated format A solution is a set of systems which represents an optimal combination of systems to maximize the score. :param solutions: list of systems found :type solutions: list of list of :class:`macsypy.system.System` objects :param hit_system_tracker: a filled HitSystemTracker. :type hit_system_tracker: :class:`macsypy.system.HitSystemTracker` object :param sys_file: The file where to write down the systems occurrences :type sys_file: file object :return: None """ print(_outfile_header(models_fam_name, models_version), file=sys_file) if solutions: sol_serializer = TsvSolutionSerializer() print("# Systems found:", file=sys_file) print(sol_serializer.header, file=sys_file) for sol_id, solution in enumerate(solutions, 1): print(sol_serializer.serialize(solution, sol_id, hit_system_tracker), file=sys_file, end='') warnings = _loner_warning(solution.systems) if warnings: print("\n".join(warnings) + "\n", file=sys_file) else: print("# No Systems found", file=sys_file) def _loner_warning(systems): """ :param systems: sequence of systems :return: warning for loner which have less occurrences than systems occurrences in which this lone is used except if the loner is also multi system :rtype: list of string """ warnings = [] loner_tracker = {} for syst in systems: loners = syst.get_loners() for loner in loners: if loner.multi_system: # the loner multi_systems can appear in several systems continue elif loner in loner_tracker: loner_tracker[loner].append(syst) else: loner_tracker[loner] = [syst] for loner, systs in loner_tracker.items(): if len(loner) < len(systs): # len(loners) count the number of loner occurrence the loner and its counterpart warnings.append(f"# WARNING Loner: there is only {len(loner)} occurrence(s) of loner '{loner.gene.name}' " f"and {len(systs)} potential systems [{', '.join([s.id for s in systs])}]") return warnings def summary_best_solution(models_fam_name, models_version, best_solution_path, sys_file, models_fqn, replicon_names): """ do a summary of best_solution in best_solution_path and write it on out_path a summary compute the number of system occurrence for each model and each replicon .. code-block:: text replicon model_fqn_1 model_fqn_2 .... rep_name_1 1 2 rep_name_2 2 0 columns are separated by \t character :param str best_solution_path: the path to the best_solution file in tsv format :param sys_file: the file where to save the summary :param models_fqn: the fully qualified names of the models :type models_fqn: list of string :param replicon_names: the name of the replicons used :type replicon_names: list of string """ print(_outfile_header(models_fam_name, models_version), file=sys_file) def fill_replicon(summary): index_name = summary.index.name computed_replicons = set(summary.index) lacking_replicons = set(replicon_names) - computed_replicons lacking_replicons = sorted(lacking_replicons) rows = pd.DataFrame({models: [0 * len(lacking_replicons)] for models in summary.columns}, index=lacking_replicons) summary = pd.concat([summary, rows], ignore_index=False) summary.index.name = index_name return summary def fill_models(summary): computed_models = set(summary.columns) lacking_models = set(models_fqn) - computed_models lacking_models = sorted(lacking_models) for model in lacking_models: summary[model] = [0 for _ in summary.index] return summary try: best_solution = pd.read_csv(best_solution_path, sep='\t', comment='#') except pd.errors.EmptyDataError: summary = pd.DataFrame(0, index=replicon_names, columns=models_fqn) summary.index.name = 'replicon' else: selection = best_solution[['replicon', 'sys_id', 'model_fqn']] dropped = selection.drop_duplicates(subset=['replicon', 'sys_id']) summary = pd.crosstab(index=dropped.replicon, columns=dropped['model_fqn']) summary = fill_replicon(summary) summary = fill_models(summary) summary.to_csv(sys_file, sep='\t') def loners_to_tsv(models_fam_name, models_version, systems, sys_file): """ get loners from valid systems and save them on file :param systems: the systems from which the loners are extract :type systems: list of :class:`macsypy.system.System` object :param sys_file: the file where loners are saved :type sys_file: file object open in write mode """ print(_outfile_header(models_fam_name, models_version), file=sys_file) if systems: best_loners = set() for syst in systems: best_loners.update(syst.get_loners()) if best_loners: serializer = TsvSpecialHitSerializer() loners = serializer.serialize(best_loners) print("# Loners found:", file=sys_file) print(loners, file=sys_file) else: print("# No Loners found", file=sys_file) else: print("# No Loners found", file=sys_file) def multisystems_to_tsv(models_fam_name, models_version, systems, sys_file): """ get multisystems from valid systems and save them on file :param systems: the systems from which the loners are extract :type systems: list of :class:`macsypy.system.System` object :param sys_file: the file where multisystems are saved :type sys_file: file object open in write mode """ print(_outfile_header(models_fam_name, models_version), file=sys_file) if systems: best_multisystems = set() for syst in systems: best_multisystems.update(syst.get_multisystems()) if best_multisystems: serializer = TsvSpecialHitSerializer() multisystems = serializer.serialize(best_multisystems) print("# Multisystems found:", file=sys_file) print(multisystems, file=sys_file) else: print("# No Multisystems found", file=sys_file) else: print("# No Multisystems found", file=sys_file) def rejected_candidates_to_txt(models_fam_name, models_version, rejected_candidates, cand_file): """ print rejected clusters in a file :param rejected_candidates: list of candidates which does not contitute a system :type rejected_candidates: list of :class:`macsypy.system.RejectedCandidate` objects :param cand_file: The file where to write down the rejected candidates :type cand_file: file object :return: None """ print(_outfile_header(models_fam_name, models_version), file=cand_file) if rejected_candidates: print("# Rejected candidates:\n", file=cand_file) for rej_cand in rejected_candidates: print(rej_cand, file=cand_file, end='') print("=" * 60, file=cand_file) else: print("# No Rejected candidates", file=cand_file) def rejected_candidates_to_tsv(models_fam_name, models_version, rejected_candidates, cand_file): """ """ print(_outfile_header(models_fam_name, models_version), file=cand_file) if rejected_candidates: serializer = TsvRejectedCandidatesSerializer() rej_candidates = serializer.serialize(rejected_candidates) print("# Rejected candidates found:", file=cand_file) print(rej_candidates, file=cand_file, end='') else: print("# No Rejected candidates", file=cand_file) def likely_systems_to_txt(models_fam_name, models_version, likely_systems, hit_system_tracker, sys_file): """ print likely systems occurrences (from unordered replicon) in a file in text human readable format :param likely_systems: list of systems found :type likely_systems: list of :class:`macsypy.system.LikelySystem` objects :param hit_system_tracker: a filled HitSystemTracker. :type hit_system_tracker: :class:`macsypy.system.HitSystemTracker` object :param sys_file: file object :return: None """ print(_outfile_header(models_fam_name, models_version), file=sys_file) if likely_systems: print("# Systems found:\n", file=sys_file) for system in likely_systems: sys_serializer = TxtLikelySystemSerializer() print(sys_serializer.serialize(system, hit_system_tracker), file=sys_file) else: print("# No Likely Systems found", file=sys_file) def likely_systems_to_tsv(models_fam_name, models_version, likely_systems, hit_system_tracker, sys_file): """ print likely systems occurrences (from unordered replicon) in a file in tabulated separeted value (tsv) format :param likely_systems: list of systems found :type likely_systems: list of :class:`macsypy.system.LikelySystem` objects :param hit_system_tracker: a filled HitSystemTracker. :type hit_system_tracker: :class:`macsypy.system.HitSystemTracker` object :param sys_file: The file where to write down the systems occurrences :type sys_file: file object :return: None """ print(_outfile_header(models_fam_name, models_version), file=sys_file) if likely_systems: print("# Likely Systems found:\n", file=sys_file) print(TsvLikelySystemSerializer.header, file=sys_file) for l_system in likely_systems: sys_serializer = TsvLikelySystemSerializer() print(sys_serializer.serialize(l_system, hit_system_tracker), file=sys_file) else: print("# No Likely Systems found", file=sys_file) def unlikely_systems_to_txt(models_fam_name, models_version, unlikely_systems, sys_file): """ print hits (from unordered replicon) which probably does not make a system occurrences in a file in human readable format :param unlikely_systems: list of :class:`macsypy.system.UnLikelySystem` objects :param sys_file: The file where to write down the systems occurrences :type sys_file: file object :return: None """ print(_outfile_header(models_fam_name, models_version), file=sys_file) if unlikely_systems: print("# Unlikely Systems found:\n", file=sys_file) for system in unlikely_systems: sys_serializer = TxtUnikelySystemSerializer() print(sys_serializer.serialize(system), file=sys_file) print("=" * 60, file=sys_file) else: print("# No Unlikely Systems found", file=sys_file) def main(args=None, loglevel=None): """ main entry point to MacSyFinder do some check before to launch :func:`main_search_systems` which is the real function that perform a search :param args: the arguments passed on the command line without the program name :type args: List of string :param loglevel: the output verbosity :type loglevel: a positive int or a string among 'DEBUG', 'INFO', 'WARNING', 'ERROR', 'CRITICAL' """ args = sys.argv[1:] if args is None else args parser, parsed_args = parse_args(args) defaults = MacsyDefaults() config = Config(defaults, parsed_args) if parsed_args.list_models: print(list_models(parsed_args), file=sys.stdout) sys.exit(0) ########################### # creation of working dir ########################### working_dir = config.working_dir() if not os.path.exists(working_dir): os.makedirs(working_dir) else: if os.path.isdir(working_dir): if os.listdir(working_dir): raise ValueError(f"'{working_dir}' already exists and is not a empty") else: raise ValueError(f"'{working_dir}' already exists and is not a directory") ################ # init loggers # ################ macsypy.init_logger(log_file=os.path.join(config.working_dir(), config.log_file()), out=not config.mute()) if not loglevel: # logs are specify from args options macsypy.logger_set_level(level=config.log_level()) else: # used by unit tests to mute or unmute logs macsypy.logger_set_level(level=loglevel) logger = logging.getLogger('macsypy.macsyfinder') if not parsed_args.previous_run and not parsed_args.models: parser.print_help() print() sys.tracebacklimit = 0 raise OptionError("argument --models or --previous-run is required.") elif not parsed_args.previous_run and not parsed_args.sequence_db: parser.print_help() print() sys.tracebacklimit = 0 raise OptionError("argument --sequence-db or --previous-run is required.") elif not parsed_args.previous_run and not parsed_args.db_type: parser.print_help() print() sys.tracebacklimit = 0 raise OptionError("argument --db-type or --previous-run is required.") ############################# # command seems Ok Let's go # ############################# _log.info(get_version_message()) _log.info(f"command used: {' '.join(sys.argv)}") ######################################## # compute which model I have to search # ######################################## model_registry = ModelRegistry() for model_dir in config.models_dir(): try: models_loc_available = scan_models_dir(model_dir, profile_suffix=config.profile_suffix(), relative_path=config.relative_path()) for model_loc in models_loc_available: model_registry.add(model_loc) except PermissionError as err: _log.warning(f"{model_dir} is not readable: {err} : skip it.") try: models_def_to_detect, models_fam_name, models_version = get_def_to_detect(config.models(), model_registry) except KeyError as err: sys.exit(f"macsyfinder: {err}") _log.info(f"\nmodels used: {models_fam_name}-{models_version}") logger.info(f"\n{f' Searching systems ':#^70}") all_systems, rejected_candidates = search_systems(config, model_registry, models_def_to_detect, logger) track_multi_systems_hit = HitSystemTracker(all_systems) if config.db_type() in ('gembase', 'ordered_replicon'): ############################# # Ordered/Gembase replicons # ############################# ########################### # select the best systems # ########################### logger.info(f"\n{f' Computing best solutions ':#^70}") all_best_solutions = [] one_best_solution = [] # group systems found by replicon # before to search best system combination import time for rep_name, syst_group in itertools.groupby(all_systems, key=lambda s: s.replicon_name): syst_group = list(syst_group) logger.info(f"Computing best solutions for {rep_name} (nb of candidate systems {len(syst_group)})") find_best_solutions_start = time.perf_counter() best_sol_4_1_replicon, score = find_best_solutions(syst_group) find_best_solutions_stop = time.perf_counter() logger.info(f"It took {find_best_solutions_stop - find_best_solutions_start:.2f}sec to find best solution" f" ({score:.2f}) for replicon {rep_name}") # if several solutions are equivalent same number of system and score is same # store all equivalent solution in all_best_solution => all_best_systems # pick one in one_best_solution => best_systems all_best_solutions.extend(best_sol_4_1_replicon) one_best_solution.append(best_sol_4_1_replicon[0]) ############################## # Write the results in files # ############################## logger.info(f"""\n{f" Writing down results in '{os.path.basename(config.working_dir())}' ":#^70}""") system_filename = os.path.join(config.working_dir(), "all_systems.txt") tsv_filename = os.path.join(config.working_dir(), "all_systems.tsv") with open(system_filename, "w") as sys_file: systems_to_txt(models_fam_name, models_version, all_systems, track_multi_systems_hit, sys_file) with open(tsv_filename, "w") as tsv_file: systems_to_tsv(models_fam_name, models_version, all_systems, track_multi_systems_hit, tsv_file) cluster_filename = os.path.join(config.working_dir(), "rejected_candidates.txt") with open(cluster_filename, "w") as clst_file: rejected_candidates.sort(key=lambda clst: (clst.replicon_name, clst.model, clst.hits)) rejected_candidates_to_txt(models_fam_name, models_version, rejected_candidates, clst_file) if not (all_systems or rejected_candidates): logger.info("No Systems found in this dataset.") cluster_filename = os.path.join(config.working_dir(), "rejected_candidates.tsv") with open(cluster_filename, "w") as clst_file: rejected_candidates_to_tsv(models_fam_name, models_version, rejected_candidates, clst_file) tsv_filename = os.path.join(config.working_dir(), "all_best_solutions.tsv") with open(tsv_filename, "w") as tsv_file: solutions_to_tsv(models_fam_name, models_version, all_best_solutions, track_multi_systems_hit, tsv_file) best_solution_filename = os.path.join(config.working_dir(), "best_solution.tsv") with open(best_solution_filename, "w") as best_solution_file: one_best_solution = [syst for sol in one_best_solution for syst in sol] one_best_solution.sort(key=lambda syst: (syst.replicon_name, syst.position[0], syst.model.fqn, - syst.score)) systems_to_tsv(models_fam_name, models_version, one_best_solution, track_multi_systems_hit, best_solution_file) loners_filename = os.path.join(config.working_dir(), "best_solution_loners.tsv") with open(loners_filename, "w") as loners_file: loners_to_tsv(models_fam_name, models_version, one_best_solution, loners_file) multisystems_filename = os.path.join(config.working_dir(), "best_solution_multisystems.tsv") with open(multisystems_filename, "w") as multisystems_file: multisystems_to_tsv(models_fam_name, models_version, one_best_solution, multisystems_file) summary_filename = os.path.join(config.working_dir(), "best_solution_summary.tsv") with open(summary_filename, "w") as summary_file: models_fqn = [m.fqn for m in models_def_to_detect] if config.db_type() == 'gembase': replicons_names = get_replicon_names(config.sequence_db()) else: # it's an ordered_replicon replicons_names = [RepliconDB.ordered_replicon_name] summary_best_solution(models_fam_name, models_version, best_solution_filename, summary_file, models_fqn, replicons_names) else: ####################### # Unordered replicons # ####################### ############################## # Write the results in files # ############################## logger.info(f"""\n{f" Writing down results in '{os.path.basename(config.working_dir())}' ":#^70}""") system_filename = os.path.join(config.working_dir(), "all_systems.txt") with open(system_filename, "w") as sys_file: likely_systems_to_txt(models_fam_name, models_version, all_systems, track_multi_systems_hit, sys_file) # forbidden = [s for s in all_systems if s.forbidden_occ] # system_filename = os.path.join(config.working_dir(), "forbidden_components.tsv") # with open(system_filename, "w") as sys_file: # likely_systems_to_tsv(forbidden, track_multi_systems_hit, sys_file) system_filename = os.path.join(config.working_dir(), "all_systems.tsv") with open(system_filename, "w") as sys_file: likely_systems_to_tsv(models_fam_name, models_version, all_systems, track_multi_systems_hit, sys_file) cluster_filename = os.path.join(config.working_dir(), "uncomplete_systems.txt") with open(cluster_filename, "w") as clst_file: unlikely_systems_to_txt(models_fam_name, models_version, rejected_candidates, clst_file) if not (all_systems or rejected_candidates): logger.info("No Systems found in this dataset.") logger.info("END") if __name__ == "__main__": main()
gpl-3.0
NickC1/skCCM
skccm/paper.py
2
9583
# # Data for analyzing causality. # By Nick Cortale # # Classes: # ccm # embed # # Paper: # Detecting Causality in Complex Ecosystems # George Sugihara et al. 2012 # # Thanks to Kenneth Ells and Dylan McNamara # # Notes: # Originally I thought this can be made way faster by only calculting the # distances once and then chopping it to a specific library length. It turns out # that calculating the distances is cheaper than filtering the indices. # import numpy as np from sklearn import neighbors from sklearn import metrics import skccm.utilities as ut import pandas as pd import time class CCM: """ Convergent cross mapping for two embedded time series """ def __init__(self, weights='exp', score_metric='corrcoef', verbose=False): """ Parameters ---------- weights : weighting scheme for predictions - exp : exponential weighting score : how to score the predictions -'score' -'corrcoef' verbose : prints out calculation status """ self.weights = weights self.score_metric = score_metric self.verbose = verbose def fit(self,X1,X2): """ Fit the training data for ccm. Creates seperate near neighbor regressors for X1 and X2 independently. X1 : embedded time series of shape (num_samps,embed_dim) X2 : embedded time series of shape (num_samps,embed_dim) near_neighs : string - 'sorround' : this is what the paper uses - 'all' : calculate the distance to all near neighbors """ # Save X1_train and X2_train for prediction later. Confusing, # but we need to make predictions about our testing set using these. self.X1 = X1 self.X2 = X2 #to sorround a point, there must be ndim + 1 points # we add two here because the closest neighbor is itself. so that is # going to be dropped. near_neighs = X1.shape[1] + 2 self.knn1 = neighbors.KNeighborsRegressor(near_neighs) self.knn2 = neighbors.KNeighborsRegressor(near_neighs) def predict_no_drop(self,lib_lengths): """ Make a prediction Parameters ---------- X1_test : test set X2_test : test set lib_lengths : list of library lengths to test """ X1_pred = [] X2_pred = [] for liblen in lib_lengths: x1_p = np.empty(self.X1.shape) x2_p = np.empty(self.X2.shape) #keep only the indices that are less than library length self.knn1.fit(self.X1[:liblen], self.X1[:liblen]) self.knn2.fit(self.X2[:liblen], self.X2[:liblen]) dist1,ind1 = self.knn1.kneighbors(self.X1) dist2,ind2 = self.knn2.kneighbors(self.X2) #drop indices and distances to themselves dist1 = dist1[:,1:] dist2 = dist2[:,1:] ind1 = ind1[:,1:] ind2 = ind2[:,1:] for j in range(self.X1.shape[1]): W1 = ut.exp_weight(dist1) W2 = ut.exp_weight(dist2) #flip the weights and indices x1_p[:, j] = np.sum(self.X1[ind2, j] * W2, axis=1) x2_p[:, j] = np.sum(self.X2[ind1, j] * W1, axis=1) X1_pred.append(x1_p) X2_pred.append(x2_p) self.X1_pred = X1_pred self.X2_pred = X2_pred return X1_pred, X2_pred def predict_drop_in_list(self,lib_lengths,emb_ind1,emb_ind2): """ Make a prediction, but the same indices cant be matched with each other. Parameters ---------- lib_lengths : library lengths to Test e_ind1 : indices of the first embed time series. e_ind2 : indices of the second embed time series. """ X1_pred = [] X2_pred = [] #need to reset the class ot use all neighbors so that the appropriate # neighbors can be dropped for each class self.knn1 = neighbors.KNeighborsRegressor(len(self.X1)) self.knn2 = neighbors.KNeighborsRegressor(len(self.X2)) self.knn1.fit(self.X1, self.X1) self.knn2.fit(self.X2, self.X2) dist1,ind1 = self.knn1.kneighbors(self.X1) dist2,ind2 = self.knn2.kneighbors(self.X2) #find the conflicting indices conf1 = ut.conflicting_indices(emb_ind1) conf2 = ut.conflicting_indices(emb_ind2) #throw out the indices that are in the embedding dist1, ind1 = ut.throw_out_nn_indices(dist1,ind1,conf1) dist2, ind2 = ut.throw_out_nn_indices(dist2,ind2,conf2) n_sorround = self.X1.shape[1] + 1 #flipping allows for a faster implentation as we can feed # ut.in_libary_len smaller and smaller arrays for liblen in lib_lengths: #keep only the indices that are less than library length #t0 = time.time() i_1, d_1 = ut.in_library_len_keep(ind1, dist1, liblen,n_sorround) i_2, d_2 = ut.in_library_len_keep(ind2, dist2, liblen,n_sorround) #t1 = time.time() #t0 = time.time() W1 = ut.exp_weight(d_1) W2 = ut.exp_weight(d_2) x1_p = np.empty(self.X1.shape) x2_p = np.empty(self.X2.shape) for j in range(self.X1.shape[1]): #flip the weights and indices x1_p[:, j] = np.sum(self.X1[i_2, j] * W2, axis=1) x2_p[:, j] = np.sum(self.X2[i_1, j] * W1, axis=1) #t1 = time.time() #print('second_loop:',np.around(t1-t0,4)) X1_pred.append(x1_p) X2_pred.append(x2_p) self.X1_pred = X1_pred self.X2_pred = X2_pred if self.verbose: print("predictions made") return X1_pred, X2_pred def score(self,how='corrcoef'): """ Evalulate the predictions. Calculates the skill down each column and averages them together to get the total skill. how : how to score the predictions -'score' -'corrcoef' """ num_preds = self.X1.shape[1] score_1 = [] score_2 = [] for x1_p, x2_p in zip(self.X1_pred, self.X2_pred): sc1 = np.empty(num_preds) sc2 = np.empty(num_preds) for ii in range(num_preds): p1 = x1_p[:,ii] p2 = x2_p[:,ii] if self.score_metric == 'score': sc1[ii] = ut.score(p1,self.X1[:,ii]) sc2[ii] = ut.score(p2,self.X2[:,ii]) if self.score_metric == 'corrcoef': sc1[ii] = ut.corrcoef(p1,self.X1[:,ii]) sc2[ii] = ut.corrcoef(p2,self.X2[:,ii]) score_1.append( np.mean(sc1) ) score_2.append( np.mean(sc2) ) return score_1, score_2 class Embed: def __init__(self,X): """ Parameters ---------- X : series or dataframe, """ if type(X) is pd.pandas.core.frame.DataFrame: self.df = X else: self.X = X def df_mutual_information(self,max_lag): """ Calculates the mutual information along each row of a time series. Ensure that the time series is continuous in time and sampled regularly. You can resample it hourly, daily, minutely etc. if needed. Parameters ---------- max_lag : int maximum amount to shift the time series Returns ------- mi : dataframe, shape(max_lag,num_cols) columns are the columns of the original dataframe with rows being the mutual information """ cols = self.df.columns mi = np.empty((max_lag, len(cols))) for i,col in enumerate(cols): self.X = self.df[col].values mi[:,i] = self.mutual_information(max_lag) mi = pd.DataFrame(mi,columns=cols) return mi def mutual_information(self,max_lag): """ Calculates the mutual information between the an unshifted time series and a shifted time series. Utilizes scikit-learn's implementation of the mutual information found in sklearn.metrics. Parameters ---------- max_lag : integer maximum amount to shift the time series Returns ------- m_score : 1-D array mutual information at between the unshifted time series and the shifted time series """ #number of bins - say ~ 20 pts / bin for joint distribution #and that at least 4 bins are required N = max(self.X.shape) num_bins = max(4.,np.floor(np.sqrt(N/20))) num_bins = int(num_bins) m_score = np.zeros((max_lag)) for jj in range(max_lag): lag = jj+1 ts = self.X[0:-lag] ts_shift = self.X[lag::] min_ts = np.min(self.X) max_ts = np.max(self.X)+.0001 #needed to bin them up bins = np.linspace(min_ts,max_ts,num_bins+1) bin_tracker = np.zeros_like(ts) bin_tracker_shift = np.zeros_like(ts_shift) for ii in range(num_bins): locs = np.logical_and( ts>=bins[ii], ts<bins[ii+1] ) bin_tracker[locs] = ii locs_shift = np.logical_and( ts_shift>=bins[ii], ts_shift<bins[ii+1] ) bin_tracker_shift[locs_shift]=ii m_score[jj] = metrics.mutual_info_score(bin_tracker,bin_tracker_shift) return m_score def embed_indices(self,lag,embed): """ Gets the indices of the embedded time series. This assumes that the time series is sequential. Non-sequential time series are currently not supported. Parameters ---------- lag : int lag values as calculated from the first minimum of the mutual info. embed : int embedding dimension, how many lag values to take """ tsize = self.X.shape[0] X = np.arange(0,tsize) t_iter = tsize-(lag*(embed-1)) features = np.zeros((t_iter,embed)) for ii in range(t_iter): end_val = ii+lag*(embed-1)+1 part = X[ii : end_val] features[ii,:] = part[::lag] return features def embed_vectors_1d(self,lag,embed): """ Embeds vectors from a one dimensional time series in m-dimensional space. Parameters ---------- lag : int lag values as calculated from the first minimum of the mutual info. embed : int embedding dimension, how many lag values to take Returns ------- features : array of shape [num_vectors,embed] A 2-D array containing all of the embedded vectors Example ------- X = [0,1,2,3,4,5,6,7,8,9,10] em = 3 lag = 2 predict=3 returns: features = [[0,2,4], [1,3,5], [2,4,6], [3,5,7]] """ tsize = self.X.shape[0] t_iter = tsize-(lag*(embed-1)) features = np.zeros((t_iter,embed)) for ii in range(t_iter): end_val = ii+lag*(embed-1)+1 part = self.X[ii : end_val] features[ii,:] = part[::lag] return features
mit
laszlocsomor/tensorflow
tensorflow/contrib/data/python/kernel_tests/sequence_dataset_op_test.py
17
11073
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for the experimental input pipeline ops.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import numpy as np from tensorflow.contrib.data.python.kernel_tests import dataset_serialization_test_base from tensorflow.contrib.data.python.ops import dataset_ops from tensorflow.python.framework import dtypes from tensorflow.python.framework import errors from tensorflow.python.ops import array_ops from tensorflow.python.platform import test class SequenceDatasetTest(test.TestCase): def testRepeatTensorDataset(self): """Test a dataset that repeats its input multiple times.""" components = (np.array(1), np.array([1, 2, 3]), np.array(37.0)) # This placeholder can be fed when dataset-definition subgraph # runs (i.e. `init_op` below) to configure the number of # repetitions used in a particular iterator. count_placeholder = array_ops.placeholder(dtypes.int64, shape=[]) iterator = (dataset_ops.Dataset.from_tensors(components) .repeat(count_placeholder).make_initializable_iterator()) init_op = iterator.initializer get_next = iterator.get_next() self.assertEqual([c.shape for c in components], [t.shape for t in get_next]) with self.test_session() as sess: # Test a finite repetition. sess.run(init_op, feed_dict={count_placeholder: 3}) for _ in range(3): results = sess.run(get_next) for component, result_component in zip(components, results): self.assertAllEqual(component, result_component) with self.assertRaises(errors.OutOfRangeError): sess.run(get_next) # Test a different finite repetition. sess.run(init_op, feed_dict={count_placeholder: 7}) for _ in range(7): results = sess.run(get_next) for component, result_component in zip(components, results): self.assertAllEqual(component, result_component) with self.assertRaises(errors.OutOfRangeError): sess.run(get_next) # Test an empty repetition. sess.run(init_op, feed_dict={count_placeholder: 0}) with self.assertRaises(errors.OutOfRangeError): sess.run(get_next) # Test an infinite repetition. # NOTE(mrry): There's not a good way to test that the sequence # actually is infinite. sess.run(init_op, feed_dict={count_placeholder: -1}) for _ in range(17): results = sess.run(get_next) for component, result_component in zip(components, results): self.assertAllEqual(component, result_component) def testTakeTensorDataset(self): components = (np.arange(10),) count_placeholder = array_ops.placeholder(dtypes.int64, shape=[]) iterator = (dataset_ops.Dataset.from_tensor_slices(components) .take(count_placeholder).make_initializable_iterator()) init_op = iterator.initializer get_next = iterator.get_next() self.assertEqual([c.shape[1:] for c in components], [t.shape for t in get_next]) with self.test_session() as sess: # Take fewer than input size sess.run(init_op, feed_dict={count_placeholder: 4}) for i in range(4): results = sess.run(get_next) self.assertAllEqual(results, components[0][i:i+1]) with self.assertRaises(errors.OutOfRangeError): sess.run(get_next) # Take more than input size sess.run(init_op, feed_dict={count_placeholder: 25}) for i in range(10): results = sess.run(get_next) self.assertAllEqual(results, components[0][i:i+1]) with self.assertRaises(errors.OutOfRangeError): sess.run(get_next) # Take all of input sess.run(init_op, feed_dict={count_placeholder: -1}) for i in range(10): results = sess.run(get_next) self.assertAllEqual(results, components[0][i:i+1]) with self.assertRaises(errors.OutOfRangeError): sess.run(get_next) # Take nothing sess.run(init_op, feed_dict={count_placeholder: 0}) with self.assertRaises(errors.OutOfRangeError): sess.run(get_next) def testSkipTensorDataset(self): components = (np.arange(10),) count_placeholder = array_ops.placeholder(dtypes.int64, shape=[]) iterator = (dataset_ops.Dataset.from_tensor_slices(components) .skip(count_placeholder).make_initializable_iterator()) init_op = iterator.initializer get_next = iterator.get_next() self.assertEqual([c.shape[1:] for c in components], [t.shape for t in get_next]) with self.test_session() as sess: # Skip fewer than input size, we should skip # the first 4 elements and then read the rest. sess.run(init_op, feed_dict={count_placeholder: 4}) for i in range(4, 10): results = sess.run(get_next) self.assertAllEqual(results, components[0][i:i+1]) with self.assertRaises(errors.OutOfRangeError): sess.run(get_next) # Skip more than input size: get nothing. sess.run(init_op, feed_dict={count_placeholder: 25}) with self.assertRaises(errors.OutOfRangeError): sess.run(get_next) # Skip exactly input size. sess.run(init_op, feed_dict={count_placeholder: 10}) with self.assertRaises(errors.OutOfRangeError): sess.run(get_next) # Set -1 for 'count': skip the entire dataset. sess.run(init_op, feed_dict={count_placeholder: -1}) with self.assertRaises(errors.OutOfRangeError): sess.run(get_next) # Skip nothing sess.run(init_op, feed_dict={count_placeholder: 0}) for i in range(0, 10): results = sess.run(get_next) self.assertAllEqual(results, components[0][i:i+1]) with self.assertRaises(errors.OutOfRangeError): sess.run(get_next) def testRepeatRepeatTensorDataset(self): """Test the composition of repeat datasets.""" components = (np.array(1), np.array([1, 2, 3]), np.array(37.0)) inner_count = array_ops.placeholder(dtypes.int64, shape=[]) outer_count = array_ops.placeholder(dtypes.int64, shape=[]) iterator = (dataset_ops.Dataset.from_tensors(components).repeat(inner_count) .repeat(outer_count).make_initializable_iterator()) init_op = iterator.initializer get_next = iterator.get_next() self.assertEqual([c.shape for c in components], [t.shape for t in get_next]) with self.test_session() as sess: sess.run(init_op, feed_dict={inner_count: 7, outer_count: 14}) for _ in range(7 * 14): results = sess.run(get_next) for component, result_component in zip(components, results): self.assertAllEqual(component, result_component) with self.assertRaises(errors.OutOfRangeError): sess.run(get_next) def testRepeatEmptyDataset(self): """Test that repeating an empty dataset does not hang.""" iterator = (dataset_ops.Dataset.from_tensors(0).repeat(10).skip(10) .repeat(-1).make_initializable_iterator()) init_op = iterator.initializer get_next = iterator.get_next() with self.test_session() as sess: sess.run(init_op) with self.assertRaisesRegexp( errors.OutOfRangeError, "Attempted to repeat an empty dataset infinitely."): sess.run(get_next) class SequenceDatasetSerializationTest( dataset_serialization_test_base.DatasetSerializationTestBase): def _build_skip_dataset(self, count): components = (np.arange(10),) return dataset_ops.Dataset.from_tensor_slices(components).skip(count) def testSkipFewerThanInputs(self): count = 4 num_outputs = 10 - count self.run_core_tests(lambda: self._build_skip_dataset(count), lambda: self._build_skip_dataset(count + 2), num_outputs) def testSkipVarious(self): # Skip more than inputs self.run_core_tests(lambda: self._build_skip_dataset(20), None, 0) # Skip exactly the input size self.run_core_tests(lambda: self._build_skip_dataset(10), None, 0) self.run_core_tests(lambda: self._build_skip_dataset(-1), None, 0) # Skip nothing self.run_core_tests(lambda: self._build_skip_dataset(0), None, 10) def _build_take_dataset(self, count): components = (np.arange(10),) return dataset_ops.Dataset.from_tensor_slices(components).take(count) def testTakeFewerThanInputs(self): count = 4 self.run_core_tests( lambda: self._build_take_dataset(count), lambda: self._build_take_dataset(count + 2), count, ) def testTakeVarious(self): # Take more than inputs self.run_core_tests(lambda: self._build_take_dataset(20), None, 10) # Take exactly the input size self.run_core_tests(lambda: self._build_take_dataset(10), None, 10) # Take all self.run_core_tests(lambda: self._build_take_dataset(-1), None, 10) # Take nothing self.run_core_tests(lambda: self._build_take_dataset(0), None, 0) def _build_repeat_dataset(self, count, take_count=3): components = (np.arange(10),) return dataset_ops.Dataset.from_tensor_slices(components).take( take_count).repeat(count) def testFiniteRepeat(self): count = 10 self.run_core_tests(lambda: self._build_repeat_dataset(count), lambda: self._build_repeat_dataset(count + 2), 3 * count) def testEmptyRepeat(self): self.run_core_tests(lambda: self._build_repeat_dataset(0), None, 0) def testInfiniteRepeat(self): self.verify_unused_iterator( lambda: self._build_repeat_dataset(-1), 10, verify_exhausted=False) self.verify_init_before_restore( lambda: self._build_repeat_dataset(-1), 10, verify_exhausted=False) self.verify_multiple_breaks( lambda: self._build_repeat_dataset(-1), 20, verify_exhausted=False) self.verify_reset_restored_iterator( lambda: self._build_repeat_dataset(-1), 20, verify_exhausted=False) self.verify_restore_in_modified_graph( lambda: self._build_repeat_dataset(-1), lambda: self._build_repeat_dataset(2), 20, verify_exhausted=False) # Test repeat empty dataset self.run_core_tests(lambda: self._build_repeat_dataset(-1, 0), None, 0) if __name__ == "__main__": test.main()
apache-2.0
scr4t/rep
rep/report/regression.py
3
5566
""" This file contains report class for regression estimators. Report includes: * features scatter plots, correlations * learning curve * feature importance * feature importance by shuffling the feature column All methods return objects, which can have plot method (details see in :class:`rep.plotting`) """ from __future__ import division, print_function, absolute_import from itertools import islice from collections import OrderedDict import itertools from sklearn.metrics import mean_squared_error from .. import plotting from ..utils import get_columns_dict from ._base import AbstractReport from ..estimators.interface import Regressor __author__ = 'Alex Rogozhnikov, Tatiana Likhomanenko' class RegressionReport(AbstractReport): """ Report simplifies comparison of regressors on the same dataset. Parameters: ----------- :param regressors: OrderedDict with regressors (RegressionFactory) :type regressors: dict[str, Regressor] :param LabeledDataStorage lds: data """ def __init__(self, regressors, lds): for name, regressor in regressors.items(): assert isinstance(regressor, Regressor), "Object {} doesn't implement interface".format(name) AbstractReport.__init__(self, lds=lds, estimators=regressors) def _predict(self, estimator, X): return estimator.predict(X) def scatter(self, correlation_pairs, mask=None, marker_size=20, alpha=0.1, grid_columns=2): """ Correlation between pairs of features :param list[tuple] correlation_pairs: pairs of features along which scatter plot will be build. :param mask: mask for data, which will be used :type mask: None or array-like or str or function(pandas.DataFrame) :param int marker_size: size of marker for each event on the plot :param float alpha: blending parameter for scatter :param int grid_columns: count of columns in grid :rtype: plotting.GridPlot """ features = list(set(itertools.chain.from_iterable(correlation_pairs))) _, df, = self._apply_mask(mask, self._get_features(features)) correlation_plots = self._scatter_addition(df, correlation_pairs, marker_size=marker_size, alpha=alpha) return plotting.GridPlot(grid_columns, *correlation_plots) def predictions_scatter(self, features=None, mask=None, marker_size=20, alpha=0.1, grid_columns=2): """ Correlation between predictions and features :param features: using features (if None then use classifier's features) :type features: None or list[str] :param mask: mask for data, which will be used :type mask: None or array-like or str or function(pandas.DataFrame) :param int marker_size: size of marker for each event on the plot :param float alpha: blending parameter for scatter :param int grid_columns: count of columns in grid :rtype: plotting.GridPlot """ features = self.common_features if features is None else features mask, df, = self._apply_mask(mask, self._get_features(features)) correlation_plots = [] for name, prediction in self.prediction.items(): correlation_pairs = [(feature, name) for feature in features] df[name] = prediction[mask] correlation_plots += self._scatter_addition(df, correlation_pairs, marker_size=marker_size, alpha=alpha) return plotting.GridPlot(grid_columns, *correlation_plots) def _scatter_addition(self, df, correlation_pairs, marker_size=20, alpha=0.1): correlation_plots = [] corr_pairs = OrderedDict() for feature1_c, feature2_c in correlation_pairs: feature1, feature2 = get_columns_dict([feature1_c, feature2_c]).keys() corr_pairs[(feature1, feature2)] = (df[feature1].values, df[feature2].values) plot_fig = plotting.ScatterPlot({'correlation': corr_pairs[(feature1, feature2)]}, alpha=alpha, size=marker_size) plot_fig.xlabel = feature1 plot_fig.ylabel = feature2 plot_fig.figsize = (8, 6) correlation_plots.append(plot_fig) return correlation_plots def _learning_curve_additional(self, name, metric_func, step, mask): """Returns values of roc curve for particular classifier, mask and metric function. """ _, data, labels, weight = self._apply_mask( mask, self._get_features(), self.target, self.weight) curve = OrderedDict() stage_values = self.estimators[name].staged_predict(data) for stage, prediction in islice(enumerate(stage_values), step - 1, None, step): curve[stage] = metric_func(labels, prediction, sample_weight=weight) return curve.keys(), curve.values() def feature_importance_shuffling(self, metric=mean_squared_error, mask=None, grid_columns=2): """ Get features importance using shuffling method (apply random permutation to one particular column) :param metric: function to measure quality function(y_true, y_predicted, sample_weight=None) :param mask: mask which points we should compare on :type mask: None or numbers.Number or array-like or str or function(pandas.DataFrame) :param int grid_columns: number of columns in grid :rtype: plotting.GridPlot """ return self._feature_importance_shuffling(metric=metric, mask=mask, grid_columns=grid_columns)
apache-2.0
Y-oHr-N/DocumentFilter
gbssl/laplacian_rls.py
2
3601
import numpy as np import scipy.sparse as sp import scipy.linalg as LA from sklearn.base import BaseEstimator from sklearn.metrics.pairwise import rbf_kernel from .base import MRBinaryClassifierMixin from .multiclass import SemiSupervisedOneVsRestClassifier class BinaryLapRLSC(BaseEstimator, MRBinaryClassifierMixin): """Laplacian Regularized Least Squares Classifier.""" def fit(self, X, y, L): """Fit the model according to the given training data. Prameters --------- X : array-like, shpae = [n_samples, n_features] Training data. y : array-like, shpae = [n_samples] Target values (unlabeled points are marked as 0). L : array-like, shpae = [n_samples, n_samples] Graph Laplacian. """ labeled = y != 0 y_labeled = y[labeled] n_samples, n_features = X.shape n_labeled_samples = y_labeled.size I = sp.eye(n_samples) J = sp.diags(labeled.astype(np.float64)) K = rbf_kernel(X, gamma=self.gamma_k) M = J @ K \ + self.gamma_a * n_labeled_samples * I \ + self.gamma_i * n_labeled_samples / n_samples**2 * L**self.p @ K # Train a classifer self.dual_coef_ = LA.solve(M, y) return self class LapRLSC(SemiSupervisedOneVsRestClassifier): """Laplacian Regularized Least Squares Classifier. Parameters ---------- gamma_a : float Regularization parameter. gamma_i : float Smoothness regularization parameter. gamma_k : float Kernel coefficient. sparsify : {'kNN', 'MkNN', 'epsilonNN'} Graph sparsification type. n_neighbors : int > 0 Number of neighbors for each sample. radius : float Radius of neighborhoods. reweight: {'rbf', 'binary'} Edge re-weighting type. t : float Kernel coefficient. normed : boolean, dealut True If True, then compute normalized Laplacian. p : integer > 0 Degree of the graph Laplacian. Attributes ---------- X_ : array-like, shape = [n_samples, n_features] Training data. y_ : array-like, shape = [n_samples] Target values. classes_ : array-like, shpae = [n_classes] Class labels. A_ : array-like, shape = [n_samples, n_samples] Adjacency matrix. estimators_ : list of n_classes estimators Estimators used for predictions. label_binarizer_ : LabelBinarizer object Object used to transform multiclass labels to binary labels and vice-versa. References ---------- Mikhail Belkin, Partha Niyogi, Vikas Sindhwani, "On Manifold Regularization", AISTATS, 2005. """ def __init__( self, gamma_a = 1.0, gamma_i = 1.0, gamma_k = 1.0, sparsify = 'kNN', n_neighbors = 10, radius = 1.0, reweight = 'rbf', t = None, normed = True, p = 1 ): super(LapRLSC, self).__init__( estimator = BinaryLapRLSC(), sparsify = sparsify, n_neighbors = n_neighbors, radius = radius, reweight = reweight, t = t, normed = normed ) self.params = { 'gamma_a': gamma_a, 'gamma_i': gamma_i, 'gamma_k': gamma_k, 'p': p } self.estimator.set_params(**self.params)
mit
thientu/scikit-learn
examples/model_selection/plot_roc.py
96
4487
""" ======================================= Receiver Operating Characteristic (ROC) ======================================= Example of Receiver Operating Characteristic (ROC) metric to evaluate classifier output quality. ROC curves typically feature true positive rate on the Y axis, and false positive rate on the X axis. This means that the top left corner of the plot is the "ideal" point - a false positive rate of zero, and a true positive rate of one. This is not very realistic, but it does mean that a larger area under the curve (AUC) is usually better. The "steepness" of ROC curves is also important, since it is ideal to maximize the true positive rate while minimizing the false positive rate. Multiclass settings ------------------- ROC curves are typically used in binary classification to study the output of a classifier. In order to extend ROC curve and ROC area to multi-class or multi-label classification, it is necessary to binarize the output. One ROC curve can be drawn per label, but one can also draw a ROC curve by considering each element of the label indicator matrix as a binary prediction (micro-averaging). Another evaluation measure for multi-class classification is macro-averaging, which gives equal weight to the classification of each label. .. note:: See also :func:`sklearn.metrics.roc_auc_score`, :ref:`example_model_selection_plot_roc_crossval.py`. """ print(__doc__) import numpy as np import matplotlib.pyplot as plt from sklearn import svm, datasets from sklearn.metrics import roc_curve, auc from sklearn.cross_validation import train_test_split from sklearn.preprocessing import label_binarize from sklearn.multiclass import OneVsRestClassifier # Import some data to play with iris = datasets.load_iris() X = iris.data y = iris.target # Binarize the output y = label_binarize(y, classes=[0, 1, 2]) n_classes = y.shape[1] # Add noisy features to make the problem harder random_state = np.random.RandomState(0) n_samples, n_features = X.shape X = np.c_[X, random_state.randn(n_samples, 200 * n_features)] # shuffle and split training and test sets X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=.5, random_state=0) # Learn to predict each class against the other classifier = OneVsRestClassifier(svm.SVC(kernel='linear', probability=True, random_state=random_state)) y_score = classifier.fit(X_train, y_train).decision_function(X_test) # Compute ROC curve and ROC area for each class fpr = dict() tpr = dict() roc_auc = dict() for i in range(n_classes): fpr[i], tpr[i], _ = roc_curve(y_test[:, i], y_score[:, i]) roc_auc[i] = auc(fpr[i], tpr[i]) # Compute micro-average ROC curve and ROC area fpr["micro"], tpr["micro"], _ = roc_curve(y_test.ravel(), y_score.ravel()) roc_auc["micro"] = auc(fpr["micro"], tpr["micro"]) ############################################################################## # Plot of a ROC curve for a specific class plt.figure() plt.plot(fpr[2], tpr[2], label='ROC curve (area = %0.2f)' % roc_auc[2]) plt.plot([0, 1], [0, 1], 'k--') plt.xlim([0.0, 1.0]) plt.ylim([0.0, 1.05]) plt.xlabel('False Positive Rate') plt.ylabel('True Positive Rate') plt.title('Receiver operating characteristic example') plt.legend(loc="lower right") plt.show() ############################################################################## # Plot ROC curves for the multiclass problem # Compute macro-average ROC curve and ROC area fpr["macro"] = np.mean([fpr[i] for i in range(n_classes)], axis=0) tpr["macro"] = np.mean([tpr[i] for i in range(n_classes)], axis=0) roc_auc["macro"] = auc(fpr["macro"], tpr["macro"]) plt.figure() plt.plot(fpr["micro"], tpr["micro"], label='micro-average ROC curve (area = {0:0.2f})' ''.format(roc_auc["micro"]), linewidth=2) plt.plot(fpr["macro"], tpr["macro"], label='macro-average ROC curve (area = {0:0.2f})' ''.format(roc_auc["macro"]), linewidth=2) for i in range(n_classes): plt.plot(fpr[i], tpr[i], label='ROC curve of class {0} (area = {1:0.2f})' ''.format(i, roc_auc[i])) plt.plot([0, 1], [0, 1], 'k--') plt.xlim([0.0, 1.0]) plt.ylim([0.0, 1.05]) plt.xlabel('False Positive Rate') plt.ylabel('True Positive Rate') plt.title('Some extension of Receiver operating characteristic to multi-class') plt.legend(loc="lower right") plt.show()
bsd-3-clause
Fireblend/scikit-learn
examples/model_selection/plot_roc.py
96
4487
""" ======================================= Receiver Operating Characteristic (ROC) ======================================= Example of Receiver Operating Characteristic (ROC) metric to evaluate classifier output quality. ROC curves typically feature true positive rate on the Y axis, and false positive rate on the X axis. This means that the top left corner of the plot is the "ideal" point - a false positive rate of zero, and a true positive rate of one. This is not very realistic, but it does mean that a larger area under the curve (AUC) is usually better. The "steepness" of ROC curves is also important, since it is ideal to maximize the true positive rate while minimizing the false positive rate. Multiclass settings ------------------- ROC curves are typically used in binary classification to study the output of a classifier. In order to extend ROC curve and ROC area to multi-class or multi-label classification, it is necessary to binarize the output. One ROC curve can be drawn per label, but one can also draw a ROC curve by considering each element of the label indicator matrix as a binary prediction (micro-averaging). Another evaluation measure for multi-class classification is macro-averaging, which gives equal weight to the classification of each label. .. note:: See also :func:`sklearn.metrics.roc_auc_score`, :ref:`example_model_selection_plot_roc_crossval.py`. """ print(__doc__) import numpy as np import matplotlib.pyplot as plt from sklearn import svm, datasets from sklearn.metrics import roc_curve, auc from sklearn.cross_validation import train_test_split from sklearn.preprocessing import label_binarize from sklearn.multiclass import OneVsRestClassifier # Import some data to play with iris = datasets.load_iris() X = iris.data y = iris.target # Binarize the output y = label_binarize(y, classes=[0, 1, 2]) n_classes = y.shape[1] # Add noisy features to make the problem harder random_state = np.random.RandomState(0) n_samples, n_features = X.shape X = np.c_[X, random_state.randn(n_samples, 200 * n_features)] # shuffle and split training and test sets X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=.5, random_state=0) # Learn to predict each class against the other classifier = OneVsRestClassifier(svm.SVC(kernel='linear', probability=True, random_state=random_state)) y_score = classifier.fit(X_train, y_train).decision_function(X_test) # Compute ROC curve and ROC area for each class fpr = dict() tpr = dict() roc_auc = dict() for i in range(n_classes): fpr[i], tpr[i], _ = roc_curve(y_test[:, i], y_score[:, i]) roc_auc[i] = auc(fpr[i], tpr[i]) # Compute micro-average ROC curve and ROC area fpr["micro"], tpr["micro"], _ = roc_curve(y_test.ravel(), y_score.ravel()) roc_auc["micro"] = auc(fpr["micro"], tpr["micro"]) ############################################################################## # Plot of a ROC curve for a specific class plt.figure() plt.plot(fpr[2], tpr[2], label='ROC curve (area = %0.2f)' % roc_auc[2]) plt.plot([0, 1], [0, 1], 'k--') plt.xlim([0.0, 1.0]) plt.ylim([0.0, 1.05]) plt.xlabel('False Positive Rate') plt.ylabel('True Positive Rate') plt.title('Receiver operating characteristic example') plt.legend(loc="lower right") plt.show() ############################################################################## # Plot ROC curves for the multiclass problem # Compute macro-average ROC curve and ROC area fpr["macro"] = np.mean([fpr[i] for i in range(n_classes)], axis=0) tpr["macro"] = np.mean([tpr[i] for i in range(n_classes)], axis=0) roc_auc["macro"] = auc(fpr["macro"], tpr["macro"]) plt.figure() plt.plot(fpr["micro"], tpr["micro"], label='micro-average ROC curve (area = {0:0.2f})' ''.format(roc_auc["micro"]), linewidth=2) plt.plot(fpr["macro"], tpr["macro"], label='macro-average ROC curve (area = {0:0.2f})' ''.format(roc_auc["macro"]), linewidth=2) for i in range(n_classes): plt.plot(fpr[i], tpr[i], label='ROC curve of class {0} (area = {1:0.2f})' ''.format(i, roc_auc[i])) plt.plot([0, 1], [0, 1], 'k--') plt.xlim([0.0, 1.0]) plt.ylim([0.0, 1.05]) plt.xlabel('False Positive Rate') plt.ylabel('True Positive Rate') plt.title('Some extension of Receiver operating characteristic to multi-class') plt.legend(loc="lower right") plt.show()
bsd-3-clause
teknix/namebench
nb_third_party/dns/rrset.py
215
5866
# Copyright (C) 2003-2007, 2009, 2010 Nominum, Inc. # # Permission to use, copy, modify, and distribute this software and its # documentation for any purpose with or without fee is hereby granted, # provided that the above copyright notice and this permission notice # appear in all copies. # # THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES # WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF # MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR # ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES # WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN # ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT # OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. """DNS RRsets (an RRset is a named rdataset)""" import dns.name import dns.rdataset import dns.rdataclass import dns.renderer class RRset(dns.rdataset.Rdataset): """A DNS RRset (named rdataset). RRset inherits from Rdataset, and RRsets can be treated as Rdatasets in most cases. There are, however, a few notable exceptions. RRsets have different to_wire() and to_text() method arguments, reflecting the fact that RRsets always have an owner name. """ __slots__ = ['name', 'deleting'] def __init__(self, name, rdclass, rdtype, covers=dns.rdatatype.NONE, deleting=None): """Create a new RRset.""" super(RRset, self).__init__(rdclass, rdtype) self.name = name self.deleting = deleting def _clone(self): obj = super(RRset, self)._clone() obj.name = self.name obj.deleting = self.deleting return obj def __repr__(self): if self.covers == 0: ctext = '' else: ctext = '(' + dns.rdatatype.to_text(self.covers) + ')' if not self.deleting is None: dtext = ' delete=' + dns.rdataclass.to_text(self.deleting) else: dtext = '' return '<DNS ' + str(self.name) + ' ' + \ dns.rdataclass.to_text(self.rdclass) + ' ' + \ dns.rdatatype.to_text(self.rdtype) + ctext + dtext + ' RRset>' def __str__(self): return self.to_text() def __eq__(self, other): """Two RRsets are equal if they have the same name and the same rdataset @rtype: bool""" if not isinstance(other, RRset): return False if self.name != other.name: return False return super(RRset, self).__eq__(other) def match(self, name, rdclass, rdtype, covers, deleting=None): """Returns True if this rrset matches the specified class, type, covers, and deletion state.""" if not super(RRset, self).match(rdclass, rdtype, covers): return False if self.name != name or self.deleting != deleting: return False return True def to_text(self, origin=None, relativize=True, **kw): """Convert the RRset into DNS master file format. @see: L{dns.name.Name.choose_relativity} for more information on how I{origin} and I{relativize} determine the way names are emitted. Any additional keyword arguments are passed on to the rdata to_text() method. @param origin: The origin for relative names, or None. @type origin: dns.name.Name object @param relativize: True if names should names be relativized @type relativize: bool""" return super(RRset, self).to_text(self.name, origin, relativize, self.deleting, **kw) def to_wire(self, file, compress=None, origin=None, **kw): """Convert the RRset to wire format.""" return super(RRset, self).to_wire(self.name, file, compress, origin, self.deleting, **kw) def to_rdataset(self): """Convert an RRset into an Rdataset. @rtype: dns.rdataset.Rdataset object """ return dns.rdataset.from_rdata_list(self.ttl, list(self)) def from_text_list(name, ttl, rdclass, rdtype, text_rdatas): """Create an RRset with the specified name, TTL, class, and type, and with the specified list of rdatas in text format. @rtype: dns.rrset.RRset object """ if isinstance(name, (str, unicode)): name = dns.name.from_text(name, None) if isinstance(rdclass, str): rdclass = dns.rdataclass.from_text(rdclass) if isinstance(rdtype, str): rdtype = dns.rdatatype.from_text(rdtype) r = RRset(name, rdclass, rdtype) r.update_ttl(ttl) for t in text_rdatas: rd = dns.rdata.from_text(r.rdclass, r.rdtype, t) r.add(rd) return r def from_text(name, ttl, rdclass, rdtype, *text_rdatas): """Create an RRset with the specified name, TTL, class, and type and with the specified rdatas in text format. @rtype: dns.rrset.RRset object """ return from_text_list(name, ttl, rdclass, rdtype, text_rdatas) def from_rdata_list(name, ttl, rdatas): """Create an RRset with the specified name and TTL, and with the specified list of rdata objects. @rtype: dns.rrset.RRset object """ if isinstance(name, (str, unicode)): name = dns.name.from_text(name, None) if len(rdatas) == 0: raise ValueError("rdata list must not be empty") r = None for rd in rdatas: if r is None: r = RRset(name, rd.rdclass, rd.rdtype) r.update_ttl(ttl) first_time = False r.add(rd) return r def from_rdata(name, ttl, *rdatas): """Create an RRset with the specified name and TTL, and with the specified rdata objects. @rtype: dns.rrset.RRset object """ return from_rdata_list(name, ttl, rdatas)
apache-2.0
jmargeta/scikit-learn
examples/cluster/plot_lena_compress.py
4
2193
#!/usr/bin/python # -*- coding: utf-8 -*- """ ========================================================= Vector Quantization Example ========================================================= The classic image processing example, Lena, an 8-bit grayscale bit-depth, 512 x 512 sized image, is used here to illustrate how `k`-means is used for vector quantization. """ print(__doc__) # Code source: Gael Varoqueux # Modified for Documentation merge by Jaques Grobler # License: BSD import numpy as np import scipy as sp import pylab as pl from sklearn import cluster n_clusters = 5 np.random.seed(0) try: lena = sp.lena() except AttributeError: # Newer versions of scipy have lena in misc from scipy import misc lena = misc.lena() X = lena.reshape((-1, 1)) # We need an (n_sample, n_feature) array k_means = cluster.KMeans(n_clusters=n_clusters, n_init=4) k_means.fit(X) values = k_means.cluster_centers_.squeeze() labels = k_means.labels_ # create an array from labels and values lena_compressed = np.choose(labels, values) lena_compressed.shape = lena.shape vmin = lena.min() vmax = lena.max() # original lena pl.figure(1, figsize=(3, 2.2)) pl.imshow(lena, cmap=pl.cm.gray, vmin=vmin, vmax=256) # compressed lena pl.figure(2, figsize=(3, 2.2)) pl.imshow(lena_compressed, cmap=pl.cm.gray, vmin=vmin, vmax=vmax) # equal bins lena regular_values = np.linspace(0, 256, n_clusters + 1) regular_labels = np.searchsorted(regular_values, lena) - 1 regular_values = .5 * (regular_values[1:] + regular_values[:-1]) # mean regular_lena = np.choose(regular_labels.ravel(), regular_values) regular_lena.shape = lena.shape pl.figure(3, figsize=(3, 2.2)) pl.imshow(regular_lena, cmap=pl.cm.gray, vmin=vmin, vmax=vmax) # histogram pl.figure(4, figsize=(3, 2.2)) pl.clf() pl.axes([.01, .01, .98, .98]) pl.hist(X, bins=256, color='.5', edgecolor='.5') pl.yticks(()) pl.xticks(regular_values) values = np.sort(values) for center_1, center_2 in zip(values[:-1], values[1:]): pl.axvline(.5 * (center_1 + center_2), color='b') for center_1, center_2 in zip(regular_values[:-1], regular_values[1:]): pl.axvline(.5 * (center_1 + center_2), color='b', linestyle='--') pl.show()
bsd-3-clause
woobe/h2o
R/tests/autoGen/makeTestTasks.py
11
2537
## # Create the 'tasks' file # Each line of tasks contains parameters for building the RUnit ## import json from GenUtils import * def genTasks(): jsondata = open('./smalldata.json') data = json.load(jsondata) jsondata.close() tasks = open('./tasks', 'wb') for FU in ['[','<','<=','>','>=','==','!=',makeCompound(1),makeCompound(2)]: #,makeCompound(3),makeCompound(4)]: achoice = None for i in sample(range(len(data['datasets'])), 5): #[choice(range(len(data['datasets']))) for i in range(60)]: #for i in range(len(data['datasets'])): cnt = achoice datajson = data['datasets'][i] DATANAME = datajson.keys()[0] if FU != '[' and 'number' not in datajson[DATANAME]['ATTRS']['TYPES']: continue PATH = '"' + datajson[DATANAME]['PATHS'][0] + '"' TESTNAME = genTestName(FU, DATANAME) DESCRIPTION = '"' + genTestDescription(FU, DATANAME) + '"' COLS = "" if FU != '[': COLS = datajson[DATANAME]['ATTRS']['NAMES'] if len(FU.split(';')) > 1: achoice = 1 if cnt is not None: achoice += cnt FUPARAMS = generateFUParams(FU, DATANAME, datajson, choiceAs = achoice) if FUPARAMS == 'abort': continue if FU == '[': cols,rows,colPipeRow,loopCols,loopRows,loopColPipeLoopRow = FUPARAMS FUPARAMS = ':'.join([TESTNAME, DESCRIPTION, cols,rows,colPipeRow,loopCols,loopRows,loopColPipeLoopRow]) task = ','.join([TESTNAME,FU,DATANAME,PATH,'filterTask','mungeTask',FUPARAMS]) tasks.write(task) tasks.write('\n') continue if len(FU.split(';')) > 1: valLPipeColL, valRPipeColR, valL2PipeColL2, valR2PipeColR2 = FUPARAMS FUPARAMS = ':'.join([TESTNAME,DESCRIPTION,';'.join(COLS),valLPipeColL, valRPipeColR, valL2PipeColL2, valR2PipeColR2]) task = ','.join([TESTNAME,FU,DATANAME,PATH,'filterTask','mungeTask',FUPARAMS]) tasks.write(task) tasks.write('\n') achoice += 1 continue valPipeCol, valPipeCol2 = FUPARAMS FUPARAMS = ':'.join([TESTNAME, DESCRIPTION, ';'.join(COLS),valPipeCol, valPipeCol2]) task = ','.join([TESTNAME,FU,DATANAME,PATH,'filterTask','mungeTask',FUPARAMS]) tasks.write(task) tasks.write('\n')
apache-2.0
thientu/scikit-learn
sklearn/metrics/tests/test_pairwise.py
71
25104
import numpy as np from numpy import linalg from scipy.sparse import dok_matrix, csr_matrix, issparse from scipy.spatial.distance import cosine, cityblock, minkowski, wminkowski from sklearn.utils.testing import assert_greater from sklearn.utils.testing import assert_array_almost_equal from sklearn.utils.testing import assert_almost_equal from sklearn.utils.testing import assert_equal from sklearn.utils.testing import assert_array_equal from sklearn.utils.testing import assert_raises from sklearn.utils.testing import assert_raises_regexp from sklearn.utils.testing import assert_true from sklearn.externals.six import iteritems from sklearn.metrics.pairwise import euclidean_distances from sklearn.metrics.pairwise import manhattan_distances from sklearn.metrics.pairwise import linear_kernel from sklearn.metrics.pairwise import chi2_kernel, additive_chi2_kernel from sklearn.metrics.pairwise import polynomial_kernel from sklearn.metrics.pairwise import rbf_kernel from sklearn.metrics.pairwise import sigmoid_kernel from sklearn.metrics.pairwise import cosine_similarity from sklearn.metrics.pairwise import cosine_distances from sklearn.metrics.pairwise import pairwise_distances from sklearn.metrics.pairwise import pairwise_distances_argmin_min from sklearn.metrics.pairwise import pairwise_distances_argmin from sklearn.metrics.pairwise import pairwise_kernels from sklearn.metrics.pairwise import PAIRWISE_KERNEL_FUNCTIONS from sklearn.metrics.pairwise import PAIRWISE_DISTANCE_FUNCTIONS from sklearn.metrics.pairwise import PAIRED_DISTANCES from sklearn.metrics.pairwise import check_pairwise_arrays from sklearn.metrics.pairwise import check_paired_arrays from sklearn.metrics.pairwise import _parallel_pairwise from sklearn.metrics.pairwise import paired_distances from sklearn.metrics.pairwise import paired_euclidean_distances from sklearn.metrics.pairwise import paired_manhattan_distances from sklearn.preprocessing import normalize def test_pairwise_distances(): # Test the pairwise_distance helper function. rng = np.random.RandomState(0) # Euclidean distance should be equivalent to calling the function. X = rng.random_sample((5, 4)) S = pairwise_distances(X, metric="euclidean") S2 = euclidean_distances(X) assert_array_almost_equal(S, S2) # Euclidean distance, with Y != X. Y = rng.random_sample((2, 4)) S = pairwise_distances(X, Y, metric="euclidean") S2 = euclidean_distances(X, Y) assert_array_almost_equal(S, S2) # Test with tuples as X and Y X_tuples = tuple([tuple([v for v in row]) for row in X]) Y_tuples = tuple([tuple([v for v in row]) for row in Y]) S2 = pairwise_distances(X_tuples, Y_tuples, metric="euclidean") assert_array_almost_equal(S, S2) # "cityblock" uses sklearn metric, cityblock (function) is scipy.spatial. S = pairwise_distances(X, metric="cityblock") S2 = pairwise_distances(X, metric=cityblock) assert_equal(S.shape[0], S.shape[1]) assert_equal(S.shape[0], X.shape[0]) assert_array_almost_equal(S, S2) # The manhattan metric should be equivalent to cityblock. S = pairwise_distances(X, Y, metric="manhattan") S2 = pairwise_distances(X, Y, metric=cityblock) assert_equal(S.shape[0], X.shape[0]) assert_equal(S.shape[1], Y.shape[0]) assert_array_almost_equal(S, S2) # Low-level function for manhattan can divide in blocks to avoid # using too much memory during the broadcasting S3 = manhattan_distances(X, Y, size_threshold=10) assert_array_almost_equal(S, S3) # Test cosine as a string metric versus cosine callable # "cosine" uses sklearn metric, cosine (function) is scipy.spatial S = pairwise_distances(X, Y, metric="cosine") S2 = pairwise_distances(X, Y, metric=cosine) assert_equal(S.shape[0], X.shape[0]) assert_equal(S.shape[1], Y.shape[0]) assert_array_almost_equal(S, S2) # Test with sparse X and Y, # currently only supported for Euclidean, L1 and cosine. X_sparse = csr_matrix(X) Y_sparse = csr_matrix(Y) S = pairwise_distances(X_sparse, Y_sparse, metric="euclidean") S2 = euclidean_distances(X_sparse, Y_sparse) assert_array_almost_equal(S, S2) S = pairwise_distances(X_sparse, Y_sparse, metric="cosine") S2 = cosine_distances(X_sparse, Y_sparse) assert_array_almost_equal(S, S2) S = pairwise_distances(X_sparse, Y_sparse.tocsc(), metric="manhattan") S2 = manhattan_distances(X_sparse.tobsr(), Y_sparse.tocoo()) assert_array_almost_equal(S, S2) S2 = manhattan_distances(X, Y) assert_array_almost_equal(S, S2) # Test with scipy.spatial.distance metric, with a kwd kwds = {"p": 2.0} S = pairwise_distances(X, Y, metric="minkowski", **kwds) S2 = pairwise_distances(X, Y, metric=minkowski, **kwds) assert_array_almost_equal(S, S2) # same with Y = None kwds = {"p": 2.0} S = pairwise_distances(X, metric="minkowski", **kwds) S2 = pairwise_distances(X, metric=minkowski, **kwds) assert_array_almost_equal(S, S2) # Test that scipy distance metrics throw an error if sparse matrix given assert_raises(TypeError, pairwise_distances, X_sparse, metric="minkowski") assert_raises(TypeError, pairwise_distances, X, Y_sparse, metric="minkowski") # Test that a value error is raised if the metric is unkown assert_raises(ValueError, pairwise_distances, X, Y, metric="blah") def test_pairwise_precomputed(): for func in [pairwise_distances, pairwise_kernels]: # Test correct shape assert_raises_regexp(ValueError, '.* shape .*', func, np.zeros((5, 3)), metric='precomputed') # with two args assert_raises_regexp(ValueError, '.* shape .*', func, np.zeros((5, 3)), np.zeros((4, 4)), metric='precomputed') # even if shape[1] agrees (although thus second arg is spurious) assert_raises_regexp(ValueError, '.* shape .*', func, np.zeros((5, 3)), np.zeros((4, 3)), metric='precomputed') # Test not copied (if appropriate dtype) S = np.zeros((5, 5)) S2 = func(S, metric="precomputed") assert_true(S is S2) # with two args S = np.zeros((5, 3)) S2 = func(S, np.zeros((3, 3)), metric="precomputed") assert_true(S is S2) # Test always returns float dtype S = func(np.array([[1]], dtype='int'), metric='precomputed') assert_equal('f', S.dtype.kind) # Test converts list to array-like S = func([[1]], metric='precomputed') assert_true(isinstance(S, np.ndarray)) def check_pairwise_parallel(func, metric, kwds): rng = np.random.RandomState(0) for make_data in (np.array, csr_matrix): X = make_data(rng.random_sample((5, 4))) Y = make_data(rng.random_sample((3, 4))) try: S = func(X, metric=metric, n_jobs=1, **kwds) except (TypeError, ValueError) as exc: # Not all metrics support sparse input # ValueError may be triggered by bad callable if make_data is csr_matrix: assert_raises(type(exc), func, X, metric=metric, n_jobs=2, **kwds) continue else: raise S2 = func(X, metric=metric, n_jobs=2, **kwds) assert_array_almost_equal(S, S2) S = func(X, Y, metric=metric, n_jobs=1, **kwds) S2 = func(X, Y, metric=metric, n_jobs=2, **kwds) assert_array_almost_equal(S, S2) def test_pairwise_parallel(): wminkowski_kwds = {'w': np.arange(1, 5).astype('double'), 'p': 1} metrics = [(pairwise_distances, 'euclidean', {}), (pairwise_distances, wminkowski, wminkowski_kwds), (pairwise_distances, 'wminkowski', wminkowski_kwds), (pairwise_kernels, 'polynomial', {'degree': 1}), (pairwise_kernels, callable_rbf_kernel, {'gamma': .1}), ] for func, metric, kwds in metrics: yield check_pairwise_parallel, func, metric, kwds def test_pairwise_callable_nonstrict_metric(): # paired_distances should allow callable metric where metric(x, x) != 0 # Knowing that the callable is a strict metric would allow the diagonal to # be left uncalculated and set to 0. assert_equal(pairwise_distances([[1]], metric=lambda x, y: 5)[0, 0], 5) def callable_rbf_kernel(x, y, **kwds): # Callable version of pairwise.rbf_kernel. K = rbf_kernel(np.atleast_2d(x), np.atleast_2d(y), **kwds) return K def test_pairwise_kernels(): # Test the pairwise_kernels helper function. rng = np.random.RandomState(0) X = rng.random_sample((5, 4)) Y = rng.random_sample((2, 4)) # Test with all metrics that should be in PAIRWISE_KERNEL_FUNCTIONS. test_metrics = ["rbf", "sigmoid", "polynomial", "linear", "chi2", "additive_chi2"] for metric in test_metrics: function = PAIRWISE_KERNEL_FUNCTIONS[metric] # Test with Y=None K1 = pairwise_kernels(X, metric=metric) K2 = function(X) assert_array_almost_equal(K1, K2) # Test with Y=Y K1 = pairwise_kernels(X, Y=Y, metric=metric) K2 = function(X, Y=Y) assert_array_almost_equal(K1, K2) # Test with tuples as X and Y X_tuples = tuple([tuple([v for v in row]) for row in X]) Y_tuples = tuple([tuple([v for v in row]) for row in Y]) K2 = pairwise_kernels(X_tuples, Y_tuples, metric=metric) assert_array_almost_equal(K1, K2) # Test with sparse X and Y X_sparse = csr_matrix(X) Y_sparse = csr_matrix(Y) if metric in ["chi2", "additive_chi2"]: # these don't support sparse matrices yet assert_raises(ValueError, pairwise_kernels, X_sparse, Y=Y_sparse, metric=metric) continue K1 = pairwise_kernels(X_sparse, Y=Y_sparse, metric=metric) assert_array_almost_equal(K1, K2) # Test with a callable function, with given keywords. metric = callable_rbf_kernel kwds = {} kwds['gamma'] = 0.1 K1 = pairwise_kernels(X, Y=Y, metric=metric, **kwds) K2 = rbf_kernel(X, Y=Y, **kwds) assert_array_almost_equal(K1, K2) # callable function, X=Y K1 = pairwise_kernels(X, Y=X, metric=metric, **kwds) K2 = rbf_kernel(X, Y=X, **kwds) assert_array_almost_equal(K1, K2) def test_pairwise_kernels_filter_param(): rng = np.random.RandomState(0) X = rng.random_sample((5, 4)) Y = rng.random_sample((2, 4)) K = rbf_kernel(X, Y, gamma=0.1) params = {"gamma": 0.1, "blabla": ":)"} K2 = pairwise_kernels(X, Y, metric="rbf", filter_params=True, **params) assert_array_almost_equal(K, K2) assert_raises(TypeError, pairwise_kernels, X, Y, "rbf", **params) def test_paired_distances(): # Test the pairwise_distance helper function. rng = np.random.RandomState(0) # Euclidean distance should be equivalent to calling the function. X = rng.random_sample((5, 4)) # Euclidean distance, with Y != X. Y = rng.random_sample((5, 4)) for metric, func in iteritems(PAIRED_DISTANCES): S = paired_distances(X, Y, metric=metric) S2 = func(X, Y) assert_array_almost_equal(S, S2) S3 = func(csr_matrix(X), csr_matrix(Y)) assert_array_almost_equal(S, S3) if metric in PAIRWISE_DISTANCE_FUNCTIONS: # Check the the pairwise_distances implementation # gives the same value distances = PAIRWISE_DISTANCE_FUNCTIONS[metric](X, Y) distances = np.diag(distances) assert_array_almost_equal(distances, S) # Check the callable implementation S = paired_distances(X, Y, metric='manhattan') S2 = paired_distances(X, Y, metric=lambda x, y: np.abs(x - y).sum(axis=0)) assert_array_almost_equal(S, S2) # Test that a value error is raised when the lengths of X and Y should not # differ Y = rng.random_sample((3, 4)) assert_raises(ValueError, paired_distances, X, Y) def test_pairwise_distances_argmin_min(): # Check pairwise minimum distances computation for any metric X = [[0], [1]] Y = [[-1], [2]] Xsp = dok_matrix(X) Ysp = csr_matrix(Y, dtype=np.float32) # euclidean metric D, E = pairwise_distances_argmin_min(X, Y, metric="euclidean") D2 = pairwise_distances_argmin(X, Y, metric="euclidean") assert_array_almost_equal(D, [0, 1]) assert_array_almost_equal(D2, [0, 1]) assert_array_almost_equal(D, [0, 1]) assert_array_almost_equal(E, [1., 1.]) # sparse matrix case Dsp, Esp = pairwise_distances_argmin_min(Xsp, Ysp, metric="euclidean") assert_array_equal(Dsp, D) assert_array_equal(Esp, E) # We don't want np.matrix here assert_equal(type(Dsp), np.ndarray) assert_equal(type(Esp), np.ndarray) # Non-euclidean sklearn metric D, E = pairwise_distances_argmin_min(X, Y, metric="manhattan") D2 = pairwise_distances_argmin(X, Y, metric="manhattan") assert_array_almost_equal(D, [0, 1]) assert_array_almost_equal(D2, [0, 1]) assert_array_almost_equal(E, [1., 1.]) D, E = pairwise_distances_argmin_min(Xsp, Ysp, metric="manhattan") D2 = pairwise_distances_argmin(Xsp, Ysp, metric="manhattan") assert_array_almost_equal(D, [0, 1]) assert_array_almost_equal(E, [1., 1.]) # Non-euclidean Scipy distance (callable) D, E = pairwise_distances_argmin_min(X, Y, metric=minkowski, metric_kwargs={"p": 2}) assert_array_almost_equal(D, [0, 1]) assert_array_almost_equal(E, [1., 1.]) # Non-euclidean Scipy distance (string) D, E = pairwise_distances_argmin_min(X, Y, metric="minkowski", metric_kwargs={"p": 2}) assert_array_almost_equal(D, [0, 1]) assert_array_almost_equal(E, [1., 1.]) # Compare with naive implementation rng = np.random.RandomState(0) X = rng.randn(97, 149) Y = rng.randn(111, 149) dist = pairwise_distances(X, Y, metric="manhattan") dist_orig_ind = dist.argmin(axis=0) dist_orig_val = dist[dist_orig_ind, range(len(dist_orig_ind))] dist_chunked_ind, dist_chunked_val = pairwise_distances_argmin_min( X, Y, axis=0, metric="manhattan", batch_size=50) np.testing.assert_almost_equal(dist_orig_ind, dist_chunked_ind, decimal=7) np.testing.assert_almost_equal(dist_orig_val, dist_chunked_val, decimal=7) def test_euclidean_distances(): # Check the pairwise Euclidean distances computation X = [[0]] Y = [[1], [2]] D = euclidean_distances(X, Y) assert_array_almost_equal(D, [[1., 2.]]) X = csr_matrix(X) Y = csr_matrix(Y) D = euclidean_distances(X, Y) assert_array_almost_equal(D, [[1., 2.]]) rng = np.random.RandomState(0) X = rng.random_sample((10, 4)) Y = rng.random_sample((20, 4)) X_norm_sq = (X ** 2).sum(axis=1).reshape(1, -1) Y_norm_sq = (Y ** 2).sum(axis=1).reshape(1, -1) # check that we still get the right answers with {X,Y}_norm_squared D1 = euclidean_distances(X, Y) D2 = euclidean_distances(X, Y, X_norm_squared=X_norm_sq) D3 = euclidean_distances(X, Y, Y_norm_squared=Y_norm_sq) D4 = euclidean_distances(X, Y, X_norm_squared=X_norm_sq, Y_norm_squared=Y_norm_sq) assert_array_almost_equal(D2, D1) assert_array_almost_equal(D3, D1) assert_array_almost_equal(D4, D1) # check we get the wrong answer with wrong {X,Y}_norm_squared X_norm_sq *= 0.5 Y_norm_sq *= 0.5 wrong_D = euclidean_distances(X, Y, X_norm_squared=np.zeros_like(X_norm_sq), Y_norm_squared=np.zeros_like(Y_norm_sq)) assert_greater(np.max(np.abs(wrong_D - D1)), .01) # Paired distances def test_paired_euclidean_distances(): # Check the paired Euclidean distances computation X = [[0], [0]] Y = [[1], [2]] D = paired_euclidean_distances(X, Y) assert_array_almost_equal(D, [1., 2.]) def test_paired_manhattan_distances(): # Check the paired manhattan distances computation X = [[0], [0]] Y = [[1], [2]] D = paired_manhattan_distances(X, Y) assert_array_almost_equal(D, [1., 2.]) def test_chi_square_kernel(): rng = np.random.RandomState(0) X = rng.random_sample((5, 4)) Y = rng.random_sample((10, 4)) K_add = additive_chi2_kernel(X, Y) gamma = 0.1 K = chi2_kernel(X, Y, gamma=gamma) assert_equal(K.dtype, np.float) for i, x in enumerate(X): for j, y in enumerate(Y): chi2 = -np.sum((x - y) ** 2 / (x + y)) chi2_exp = np.exp(gamma * chi2) assert_almost_equal(K_add[i, j], chi2) assert_almost_equal(K[i, j], chi2_exp) # check diagonal is ones for data with itself K = chi2_kernel(Y) assert_array_equal(np.diag(K), 1) # check off-diagonal is < 1 but > 0: assert_true(np.all(K > 0)) assert_true(np.all(K - np.diag(np.diag(K)) < 1)) # check that float32 is preserved X = rng.random_sample((5, 4)).astype(np.float32) Y = rng.random_sample((10, 4)).astype(np.float32) K = chi2_kernel(X, Y) assert_equal(K.dtype, np.float32) # check integer type gets converted, # check that zeros are handled X = rng.random_sample((10, 4)).astype(np.int32) K = chi2_kernel(X, X) assert_true(np.isfinite(K).all()) assert_equal(K.dtype, np.float) # check that kernel of similar things is greater than dissimilar ones X = [[.3, .7], [1., 0]] Y = [[0, 1], [.9, .1]] K = chi2_kernel(X, Y) assert_greater(K[0, 0], K[0, 1]) assert_greater(K[1, 1], K[1, 0]) # test negative input assert_raises(ValueError, chi2_kernel, [[0, -1]]) assert_raises(ValueError, chi2_kernel, [[0, -1]], [[-1, -1]]) assert_raises(ValueError, chi2_kernel, [[0, 1]], [[-1, -1]]) # different n_features in X and Y assert_raises(ValueError, chi2_kernel, [[0, 1]], [[.2, .2, .6]]) # sparse matrices assert_raises(ValueError, chi2_kernel, csr_matrix(X), csr_matrix(Y)) assert_raises(ValueError, additive_chi2_kernel, csr_matrix(X), csr_matrix(Y)) def test_kernel_symmetry(): # Valid kernels should be symmetric rng = np.random.RandomState(0) X = rng.random_sample((5, 4)) for kernel in (linear_kernel, polynomial_kernel, rbf_kernel, sigmoid_kernel, cosine_similarity): K = kernel(X, X) assert_array_almost_equal(K, K.T, 15) def test_kernel_sparse(): rng = np.random.RandomState(0) X = rng.random_sample((5, 4)) X_sparse = csr_matrix(X) for kernel in (linear_kernel, polynomial_kernel, rbf_kernel, sigmoid_kernel, cosine_similarity): K = kernel(X, X) K2 = kernel(X_sparse, X_sparse) assert_array_almost_equal(K, K2) def test_linear_kernel(): rng = np.random.RandomState(0) X = rng.random_sample((5, 4)) K = linear_kernel(X, X) # the diagonal elements of a linear kernel are their squared norm assert_array_almost_equal(K.flat[::6], [linalg.norm(x) ** 2 for x in X]) def test_rbf_kernel(): rng = np.random.RandomState(0) X = rng.random_sample((5, 4)) K = rbf_kernel(X, X) # the diagonal elements of a rbf kernel are 1 assert_array_almost_equal(K.flat[::6], np.ones(5)) def test_cosine_similarity_sparse_output(): # Test if cosine_similarity correctly produces sparse output. rng = np.random.RandomState(0) X = rng.random_sample((5, 4)) Y = rng.random_sample((3, 4)) Xcsr = csr_matrix(X) Ycsr = csr_matrix(Y) K1 = cosine_similarity(Xcsr, Ycsr, dense_output=False) assert_true(issparse(K1)) K2 = pairwise_kernels(Xcsr, Y=Ycsr, metric="cosine") assert_array_almost_equal(K1.todense(), K2) def test_cosine_similarity(): # Test the cosine_similarity. rng = np.random.RandomState(0) X = rng.random_sample((5, 4)) Y = rng.random_sample((3, 4)) Xcsr = csr_matrix(X) Ycsr = csr_matrix(Y) for X_, Y_ in ((X, None), (X, Y), (Xcsr, None), (Xcsr, Ycsr)): # Test that the cosine is kernel is equal to a linear kernel when data # has been previously normalized by L2-norm. K1 = pairwise_kernels(X_, Y=Y_, metric="cosine") X_ = normalize(X_) if Y_ is not None: Y_ = normalize(Y_) K2 = pairwise_kernels(X_, Y=Y_, metric="linear") assert_array_almost_equal(K1, K2) def test_check_dense_matrices(): # Ensure that pairwise array check works for dense matrices. # Check that if XB is None, XB is returned as reference to XA XA = np.resize(np.arange(40), (5, 8)) XA_checked, XB_checked = check_pairwise_arrays(XA, None) assert_true(XA_checked is XB_checked) assert_array_equal(XA, XA_checked) def test_check_XB_returned(): # Ensure that if XA and XB are given correctly, they return as equal. # Check that if XB is not None, it is returned equal. # Note that the second dimension of XB is the same as XA. XA = np.resize(np.arange(40), (5, 8)) XB = np.resize(np.arange(32), (4, 8)) XA_checked, XB_checked = check_pairwise_arrays(XA, XB) assert_array_equal(XA, XA_checked) assert_array_equal(XB, XB_checked) XB = np.resize(np.arange(40), (5, 8)) XA_checked, XB_checked = check_paired_arrays(XA, XB) assert_array_equal(XA, XA_checked) assert_array_equal(XB, XB_checked) def test_check_different_dimensions(): # Ensure an error is raised if the dimensions are different. XA = np.resize(np.arange(45), (5, 9)) XB = np.resize(np.arange(32), (4, 8)) assert_raises(ValueError, check_pairwise_arrays, XA, XB) XB = np.resize(np.arange(4 * 9), (4, 9)) assert_raises(ValueError, check_paired_arrays, XA, XB) def test_check_invalid_dimensions(): # Ensure an error is raised on 1D input arrays. # The modified tests are not 1D. In the old test, the array was internally # converted to 2D anyways XA = np.arange(45).reshape(9, 5) XB = np.arange(32).reshape(4, 8) assert_raises(ValueError, check_pairwise_arrays, XA, XB) XA = np.arange(45).reshape(9, 5) XB = np.arange(32).reshape(4, 8) assert_raises(ValueError, check_pairwise_arrays, XA, XB) def test_check_sparse_arrays(): # Ensures that checks return valid sparse matrices. rng = np.random.RandomState(0) XA = rng.random_sample((5, 4)) XA_sparse = csr_matrix(XA) XB = rng.random_sample((5, 4)) XB_sparse = csr_matrix(XB) XA_checked, XB_checked = check_pairwise_arrays(XA_sparse, XB_sparse) # compare their difference because testing csr matrices for # equality with '==' does not work as expected. assert_true(issparse(XA_checked)) assert_equal(abs(XA_sparse - XA_checked).sum(), 0) assert_true(issparse(XB_checked)) assert_equal(abs(XB_sparse - XB_checked).sum(), 0) XA_checked, XA_2_checked = check_pairwise_arrays(XA_sparse, XA_sparse) assert_true(issparse(XA_checked)) assert_equal(abs(XA_sparse - XA_checked).sum(), 0) assert_true(issparse(XA_2_checked)) assert_equal(abs(XA_2_checked - XA_checked).sum(), 0) def tuplify(X): # Turns a numpy matrix (any n-dimensional array) into tuples. s = X.shape if len(s) > 1: # Tuplify each sub-array in the input. return tuple(tuplify(row) for row in X) else: # Single dimension input, just return tuple of contents. return tuple(r for r in X) def test_check_tuple_input(): # Ensures that checks return valid tuples. rng = np.random.RandomState(0) XA = rng.random_sample((5, 4)) XA_tuples = tuplify(XA) XB = rng.random_sample((5, 4)) XB_tuples = tuplify(XB) XA_checked, XB_checked = check_pairwise_arrays(XA_tuples, XB_tuples) assert_array_equal(XA_tuples, XA_checked) assert_array_equal(XB_tuples, XB_checked) def test_check_preserve_type(): # Ensures that type float32 is preserved. XA = np.resize(np.arange(40), (5, 8)).astype(np.float32) XB = np.resize(np.arange(40), (5, 8)).astype(np.float32) XA_checked, XB_checked = check_pairwise_arrays(XA, None) assert_equal(XA_checked.dtype, np.float32) # both float32 XA_checked, XB_checked = check_pairwise_arrays(XA, XB) assert_equal(XA_checked.dtype, np.float32) assert_equal(XB_checked.dtype, np.float32) # mismatched A XA_checked, XB_checked = check_pairwise_arrays(XA.astype(np.float), XB) assert_equal(XA_checked.dtype, np.float) assert_equal(XB_checked.dtype, np.float) # mismatched B XA_checked, XB_checked = check_pairwise_arrays(XA, XB.astype(np.float)) assert_equal(XA_checked.dtype, np.float) assert_equal(XB_checked.dtype, np.float)
bsd-3-clause
jmargeta/scikit-learn
sklearn/cluster/tests/test_dbscan.py
6
2901
""" Tests for DBSCAN clustering algorithm """ import pickle import numpy as np from scipy.spatial import distance from sklearn.utils.testing import assert_equal from sklearn.cluster.dbscan_ import DBSCAN, dbscan from .common import generate_clustered_data n_clusters = 3 X = generate_clustered_data(n_clusters=n_clusters) def test_dbscan_similarity(): """Tests the DBSCAN algorithm with a similarity array.""" # Parameters chosen specifically for this task. eps = 0.15 min_samples = 10 # Compute similarities D = distance.squareform(distance.pdist(X)) D /= np.max(D) # Compute DBSCAN core_samples, labels = dbscan(D, metric="precomputed", eps=eps, min_samples=min_samples) # number of clusters, ignoring noise if present n_clusters_1 = len(set(labels)) - (1 if -1 in labels else 0) assert_equal(n_clusters_1, n_clusters) db = DBSCAN(metric="precomputed", eps=eps, min_samples=min_samples) labels = db.fit(D).labels_ n_clusters_2 = len(set(labels)) - int(-1 in labels) assert_equal(n_clusters_2, n_clusters) def test_dbscan_feature(): """Tests the DBSCAN algorithm with a feature vector array.""" # Parameters chosen specifically for this task. # Different eps to other test, because distance is not normalised. eps = 0.8 min_samples = 10 metric = 'euclidean' # Compute DBSCAN # parameters chosen for task core_samples, labels = dbscan(X, metric=metric, eps=eps, min_samples=min_samples) # number of clusters, ignoring noise if present n_clusters_1 = len(set(labels)) - int(-1 in labels) assert_equal(n_clusters_1, n_clusters) db = DBSCAN(metric=metric, eps=eps, min_samples=min_samples) labels = db.fit(X).labels_ n_clusters_2 = len(set(labels)) - int(-1 in labels) assert_equal(n_clusters_2, n_clusters) def test_dbscan_callable(): """Tests the DBSCAN algorithm with a callable metric.""" # Parameters chosen specifically for this task. # Different eps to other test, because distance is not normalised. eps = 0.8 min_samples = 10 # metric is the function reference, not the string key. metric = distance.euclidean # Compute DBSCAN # parameters chosen for task core_samples, labels = dbscan(X, metric=metric, eps=eps, min_samples=min_samples) # number of clusters, ignoring noise if present n_clusters_1 = len(set(labels)) - int(-1 in labels) assert_equal(n_clusters_1, n_clusters) db = DBSCAN(metric=metric, eps=eps, min_samples=min_samples) labels = db.fit(X).labels_ n_clusters_2 = len(set(labels)) - int(-1 in labels) assert_equal(n_clusters_2, n_clusters) def test_pickle(): obj = DBSCAN() s = pickle.dumps(obj) assert_equal(type(pickle.loads(s)), obj.__class__)
bsd-3-clause
axbaretto/beam
sdks/python/.tox/lint/lib/python2.7/site-packages/unit_tests/test_client.py
4
22674
# Copyright 2015 Google Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest import mock def _make_credentials(): import google.auth.credentials return mock.Mock(spec=google.auth.credentials.Credentials) class TestClient(unittest.TestCase): @staticmethod def _get_target_class(): from google.cloud.bigquery.client import Client return Client def _make_one(self, *args, **kw): return self._get_target_class()(*args, **kw) def test_ctor(self): from google.cloud.bigquery._http import Connection PROJECT = 'PROJECT' creds = _make_credentials() http = object() client = self._make_one(project=PROJECT, credentials=creds, http=http) self.assertIsInstance(client._connection, Connection) self.assertIs(client._connection.credentials, creds) self.assertIs(client._connection.http, http) def test_list_projects_defaults(self): import six from google.cloud.bigquery.client import Project PROJECT_1 = 'PROJECT_ONE' PROJECT_2 = 'PROJECT_TWO' PATH = 'projects' TOKEN = 'TOKEN' DATA = { 'nextPageToken': TOKEN, 'projects': [ {'kind': 'bigquery#project', 'id': PROJECT_1, 'numericId': 1, 'projectReference': {'projectId': PROJECT_1}, 'friendlyName': 'One'}, {'kind': 'bigquery#project', 'id': PROJECT_2, 'numericId': 2, 'projectReference': {'projectId': PROJECT_2}, 'friendlyName': 'Two'}, ] } creds = _make_credentials() client = self._make_one(PROJECT_1, creds) conn = client._connection = _Connection(DATA) iterator = client.list_projects() page = six.next(iterator.pages) projects = list(page) token = iterator.next_page_token self.assertEqual(len(projects), len(DATA['projects'])) for found, expected in zip(projects, DATA['projects']): self.assertIsInstance(found, Project) self.assertEqual(found.project_id, expected['id']) self.assertEqual(found.numeric_id, expected['numericId']) self.assertEqual(found.friendly_name, expected['friendlyName']) self.assertEqual(token, TOKEN) self.assertEqual(len(conn._requested), 1) req = conn._requested[0] self.assertEqual(req['method'], 'GET') self.assertEqual(req['path'], '/%s' % PATH) def test_list_projects_explicit_response_missing_projects_key(self): import six PROJECT = 'PROJECT' PATH = 'projects' TOKEN = 'TOKEN' DATA = {} creds = _make_credentials() client = self._make_one(PROJECT, creds) conn = client._connection = _Connection(DATA) iterator = client.list_projects(max_results=3, page_token=TOKEN) page = six.next(iterator.pages) projects = list(page) token = iterator.next_page_token self.assertEqual(len(projects), 0) self.assertIsNone(token) self.assertEqual(len(conn._requested), 1) req = conn._requested[0] self.assertEqual(req['method'], 'GET') self.assertEqual(req['path'], '/%s' % PATH) self.assertEqual(req['query_params'], {'maxResults': 3, 'pageToken': TOKEN}) def test_list_datasets_defaults(self): import six from google.cloud.bigquery.dataset import Dataset PROJECT = 'PROJECT' DATASET_1 = 'dataset_one' DATASET_2 = 'dataset_two' PATH = 'projects/%s/datasets' % PROJECT TOKEN = 'TOKEN' DATA = { 'nextPageToken': TOKEN, 'datasets': [ {'kind': 'bigquery#dataset', 'id': '%s:%s' % (PROJECT, DATASET_1), 'datasetReference': {'datasetId': DATASET_1, 'projectId': PROJECT}, 'friendlyName': None}, {'kind': 'bigquery#dataset', 'id': '%s:%s' % (PROJECT, DATASET_2), 'datasetReference': {'datasetId': DATASET_2, 'projectId': PROJECT}, 'friendlyName': 'Two'}, ] } creds = _make_credentials() client = self._make_one(PROJECT, creds) conn = client._connection = _Connection(DATA) iterator = client.list_datasets() page = six.next(iterator.pages) datasets = list(page) token = iterator.next_page_token self.assertEqual(len(datasets), len(DATA['datasets'])) for found, expected in zip(datasets, DATA['datasets']): self.assertIsInstance(found, Dataset) self.assertEqual(found.dataset_id, expected['id']) self.assertEqual(found.friendly_name, expected['friendlyName']) self.assertEqual(token, TOKEN) self.assertEqual(len(conn._requested), 1) req = conn._requested[0] self.assertEqual(req['method'], 'GET') self.assertEqual(req['path'], '/%s' % PATH) def test_list_datasets_explicit_response_missing_datasets_key(self): import six PROJECT = 'PROJECT' PATH = 'projects/%s/datasets' % PROJECT TOKEN = 'TOKEN' DATA = {} creds = _make_credentials() client = self._make_one(PROJECT, creds) conn = client._connection = _Connection(DATA) iterator = client.list_datasets( include_all=True, max_results=3, page_token=TOKEN) page = six.next(iterator.pages) datasets = list(page) token = iterator.next_page_token self.assertEqual(len(datasets), 0) self.assertIsNone(token) self.assertEqual(len(conn._requested), 1) req = conn._requested[0] self.assertEqual(req['method'], 'GET') self.assertEqual(req['path'], '/%s' % PATH) self.assertEqual(req['query_params'], {'all': True, 'maxResults': 3, 'pageToken': TOKEN}) def test_dataset(self): from google.cloud.bigquery.dataset import Dataset PROJECT = 'PROJECT' DATASET = 'dataset_name' creds = _make_credentials() http = object() client = self._make_one(project=PROJECT, credentials=creds, http=http) dataset = client.dataset(DATASET) self.assertIsInstance(dataset, Dataset) self.assertEqual(dataset.name, DATASET) self.assertIs(dataset._client, client) def test_job_from_resource_unknown_type(self): PROJECT = 'PROJECT' creds = _make_credentials() client = self._make_one(PROJECT, creds) with self.assertRaises(ValueError): client.job_from_resource({'configuration': {'nonesuch': {}}}) def test_list_jobs_defaults(self): import six from google.cloud.bigquery.job import LoadTableFromStorageJob from google.cloud.bigquery.job import CopyJob from google.cloud.bigquery.job import ExtractTableToStorageJob from google.cloud.bigquery.job import QueryJob PROJECT = 'PROJECT' DATASET = 'test_dataset' SOURCE_TABLE = 'source_table' DESTINATION_TABLE = 'destination_table' QUERY_DESTINATION_TABLE = 'query_destination_table' SOURCE_URI = 'gs://test_bucket/src_object*' DESTINATION_URI = 'gs://test_bucket/dst_object*' JOB_TYPES = { 'load_job': LoadTableFromStorageJob, 'copy_job': CopyJob, 'extract_job': ExtractTableToStorageJob, 'query_job': QueryJob, } PATH = 'projects/%s/jobs' % PROJECT TOKEN = 'TOKEN' QUERY = 'SELECT * from test_dataset:test_table' ASYNC_QUERY_DATA = { 'id': '%s:%s' % (PROJECT, 'query_job'), 'jobReference': { 'projectId': PROJECT, 'jobId': 'query_job', }, 'state': 'DONE', 'configuration': { 'query': { 'query': QUERY, 'destinationTable': { 'projectId': PROJECT, 'datasetId': DATASET, 'tableId': QUERY_DESTINATION_TABLE, }, 'createDisposition': 'CREATE_IF_NEEDED', 'writeDisposition': 'WRITE_TRUNCATE', } }, } EXTRACT_DATA = { 'id': '%s:%s' % (PROJECT, 'extract_job'), 'jobReference': { 'projectId': PROJECT, 'jobId': 'extract_job', }, 'state': 'DONE', 'configuration': { 'extract': { 'sourceTable': { 'projectId': PROJECT, 'datasetId': DATASET, 'tableId': SOURCE_TABLE, }, 'destinationUris': [DESTINATION_URI], } }, } COPY_DATA = { 'id': '%s:%s' % (PROJECT, 'copy_job'), 'jobReference': { 'projectId': PROJECT, 'jobId': 'copy_job', }, 'state': 'DONE', 'configuration': { 'copy': { 'sourceTables': [{ 'projectId': PROJECT, 'datasetId': DATASET, 'tableId': SOURCE_TABLE, }], 'destinationTable': { 'projectId': PROJECT, 'datasetId': DATASET, 'tableId': DESTINATION_TABLE, }, } }, } LOAD_DATA = { 'id': '%s:%s' % (PROJECT, 'load_job'), 'jobReference': { 'projectId': PROJECT, 'jobId': 'load_job', }, 'state': 'DONE', 'configuration': { 'load': { 'destinationTable': { 'projectId': PROJECT, 'datasetId': DATASET, 'tableId': SOURCE_TABLE, }, 'sourceUris': [SOURCE_URI], } }, } DATA = { 'nextPageToken': TOKEN, 'jobs': [ ASYNC_QUERY_DATA, EXTRACT_DATA, COPY_DATA, LOAD_DATA, ] } creds = _make_credentials() client = self._make_one(PROJECT, creds) conn = client._connection = _Connection(DATA) iterator = client.list_jobs() page = six.next(iterator.pages) jobs = list(page) token = iterator.next_page_token self.assertEqual(len(jobs), len(DATA['jobs'])) for found, expected in zip(jobs, DATA['jobs']): name = expected['jobReference']['jobId'] self.assertIsInstance(found, JOB_TYPES[name]) self.assertEqual(found.name, name) self.assertEqual(token, TOKEN) self.assertEqual(len(conn._requested), 1) req = conn._requested[0] self.assertEqual(req['method'], 'GET') self.assertEqual(req['path'], '/%s' % PATH) self.assertEqual(req['query_params'], {'projection': 'full'}) def test_list_jobs_load_job_wo_sourceUris(self): import six from google.cloud.bigquery.job import LoadTableFromStorageJob PROJECT = 'PROJECT' DATASET = 'test_dataset' SOURCE_TABLE = 'source_table' JOB_TYPES = { 'load_job': LoadTableFromStorageJob, } PATH = 'projects/%s/jobs' % PROJECT TOKEN = 'TOKEN' LOAD_DATA = { 'id': '%s:%s' % (PROJECT, 'load_job'), 'jobReference': { 'projectId': PROJECT, 'jobId': 'load_job', }, 'state': 'DONE', 'configuration': { 'load': { 'destinationTable': { 'projectId': PROJECT, 'datasetId': DATASET, 'tableId': SOURCE_TABLE, }, } }, } DATA = { 'nextPageToken': TOKEN, 'jobs': [ LOAD_DATA, ] } creds = _make_credentials() client = self._make_one(PROJECT, creds) conn = client._connection = _Connection(DATA) iterator = client.list_jobs() page = six.next(iterator.pages) jobs = list(page) token = iterator.next_page_token self.assertEqual(len(jobs), len(DATA['jobs'])) for found, expected in zip(jobs, DATA['jobs']): name = expected['jobReference']['jobId'] self.assertIsInstance(found, JOB_TYPES[name]) self.assertEqual(found.name, name) self.assertEqual(token, TOKEN) self.assertEqual(len(conn._requested), 1) req = conn._requested[0] self.assertEqual(req['method'], 'GET') self.assertEqual(req['path'], '/%s' % PATH) self.assertEqual(req['query_params'], {'projection': 'full'}) def test_list_jobs_explicit_missing(self): import six PROJECT = 'PROJECT' PATH = 'projects/%s/jobs' % PROJECT DATA = {} TOKEN = 'TOKEN' creds = _make_credentials() client = self._make_one(PROJECT, creds) conn = client._connection = _Connection(DATA) iterator = client.list_jobs(max_results=1000, page_token=TOKEN, all_users=True, state_filter='done') page = six.next(iterator.pages) jobs = list(page) token = iterator.next_page_token self.assertEqual(len(jobs), 0) self.assertIsNone(token) self.assertEqual(len(conn._requested), 1) req = conn._requested[0] self.assertEqual(req['method'], 'GET') self.assertEqual(req['path'], '/%s' % PATH) self.assertEqual(req['query_params'], {'projection': 'full', 'maxResults': 1000, 'pageToken': TOKEN, 'allUsers': True, 'stateFilter': 'done'}) def test_load_table_from_storage(self): from google.cloud.bigquery.job import LoadTableFromStorageJob PROJECT = 'PROJECT' JOB = 'job_name' DATASET = 'dataset_name' DESTINATION = 'destination_table' SOURCE_URI = 'http://example.com/source.csv' creds = _make_credentials() http = object() client = self._make_one(project=PROJECT, credentials=creds, http=http) dataset = client.dataset(DATASET) destination = dataset.table(DESTINATION) job = client.load_table_from_storage(JOB, destination, SOURCE_URI) self.assertIsInstance(job, LoadTableFromStorageJob) self.assertIs(job._client, client) self.assertEqual(job.name, JOB) self.assertEqual(list(job.source_uris), [SOURCE_URI]) self.assertIs(job.destination, destination) def test_copy_table(self): from google.cloud.bigquery.job import CopyJob PROJECT = 'PROJECT' JOB = 'job_name' DATASET = 'dataset_name' SOURCE = 'source_table' DESTINATION = 'destination_table' creds = _make_credentials() http = object() client = self._make_one(project=PROJECT, credentials=creds, http=http) dataset = client.dataset(DATASET) source = dataset.table(SOURCE) destination = dataset.table(DESTINATION) job = client.copy_table(JOB, destination, source) self.assertIsInstance(job, CopyJob) self.assertIs(job._client, client) self.assertEqual(job.name, JOB) self.assertEqual(list(job.sources), [source]) self.assertIs(job.destination, destination) def test_extract_table_to_storage(self): from google.cloud.bigquery.job import ExtractTableToStorageJob PROJECT = 'PROJECT' JOB = 'job_name' DATASET = 'dataset_name' SOURCE = 'source_table' DESTINATION = 'gs://bucket_name/object_name' creds = _make_credentials() http = object() client = self._make_one(project=PROJECT, credentials=creds, http=http) dataset = client.dataset(DATASET) source = dataset.table(SOURCE) job = client.extract_table_to_storage(JOB, source, DESTINATION) self.assertIsInstance(job, ExtractTableToStorageJob) self.assertIs(job._client, client) self.assertEqual(job.name, JOB) self.assertEqual(job.source, source) self.assertEqual(list(job.destination_uris), [DESTINATION]) def test_run_async_query_defaults(self): from google.cloud.bigquery.job import QueryJob PROJECT = 'PROJECT' JOB = 'job_name' QUERY = 'select count(*) from persons' creds = _make_credentials() http = object() client = self._make_one(project=PROJECT, credentials=creds, http=http) job = client.run_async_query(JOB, QUERY) self.assertIsInstance(job, QueryJob) self.assertIs(job._client, client) self.assertEqual(job.name, JOB) self.assertEqual(job.query, QUERY) self.assertEqual(job.udf_resources, []) self.assertEqual(job.query_parameters, []) def test_run_async_w_udf_resources(self): from google.cloud.bigquery._helpers import UDFResource from google.cloud.bigquery.job import QueryJob RESOURCE_URI = 'gs://some-bucket/js/lib.js' PROJECT = 'PROJECT' JOB = 'job_name' QUERY = 'select count(*) from persons' creds = _make_credentials() http = object() client = self._make_one(project=PROJECT, credentials=creds, http=http) udf_resources = [UDFResource("resourceUri", RESOURCE_URI)] job = client.run_async_query(JOB, QUERY, udf_resources=udf_resources) self.assertIsInstance(job, QueryJob) self.assertIs(job._client, client) self.assertEqual(job.name, JOB) self.assertEqual(job.query, QUERY) self.assertEqual(job.udf_resources, udf_resources) self.assertEqual(job.query_parameters, []) def test_run_async_w_query_parameters(self): from google.cloud.bigquery._helpers import ScalarQueryParameter from google.cloud.bigquery.job import QueryJob PROJECT = 'PROJECT' JOB = 'job_name' QUERY = 'select count(*) from persons' creds = _make_credentials() http = object() client = self._make_one(project=PROJECT, credentials=creds, http=http) query_parameters = [ScalarQueryParameter('foo', 'INT64', 123)] job = client.run_async_query(JOB, QUERY, query_parameters=query_parameters) self.assertIsInstance(job, QueryJob) self.assertIs(job._client, client) self.assertEqual(job.name, JOB) self.assertEqual(job.query, QUERY) self.assertEqual(job.udf_resources, []) self.assertEqual(job.query_parameters, query_parameters) def test_run_sync_query_defaults(self): from google.cloud.bigquery.query import QueryResults PROJECT = 'PROJECT' QUERY = 'select count(*) from persons' creds = _make_credentials() http = object() client = self._make_one(project=PROJECT, credentials=creds, http=http) query = client.run_sync_query(QUERY) self.assertIsInstance(query, QueryResults) self.assertIs(query._client, client) self.assertIsNone(query.name) self.assertEqual(query.query, QUERY) self.assertEqual(query.udf_resources, []) self.assertEqual(query.query_parameters, []) def test_run_sync_query_w_udf_resources(self): from google.cloud.bigquery._helpers import UDFResource from google.cloud.bigquery.query import QueryResults RESOURCE_URI = 'gs://some-bucket/js/lib.js' PROJECT = 'PROJECT' QUERY = 'select count(*) from persons' creds = _make_credentials() http = object() client = self._make_one(project=PROJECT, credentials=creds, http=http) udf_resources = [UDFResource("resourceUri", RESOURCE_URI)] query = client.run_sync_query(QUERY, udf_resources=udf_resources) self.assertIsInstance(query, QueryResults) self.assertIs(query._client, client) self.assertIsNone(query.name) self.assertEqual(query.query, QUERY) self.assertEqual(query.udf_resources, udf_resources) self.assertEqual(query.query_parameters, []) def test_run_sync_query_w_query_parameters(self): from google.cloud.bigquery._helpers import ScalarQueryParameter from google.cloud.bigquery.query import QueryResults PROJECT = 'PROJECT' QUERY = 'select count(*) from persons' creds = _make_credentials() http = object() client = self._make_one(project=PROJECT, credentials=creds, http=http) query_parameters = [ScalarQueryParameter('foo', 'INT64', 123)] query = client.run_sync_query(QUERY, query_parameters=query_parameters) self.assertIsInstance(query, QueryResults) self.assertIs(query._client, client) self.assertIsNone(query.name) self.assertEqual(query.query, QUERY) self.assertEqual(query.udf_resources, []) self.assertEqual(query.query_parameters, query_parameters) class _Connection(object): def __init__(self, *responses): self._responses = responses self._requested = [] def api_request(self, **kw): self._requested.append(kw) response, self._responses = self._responses[0], self._responses[1:] return response
apache-2.0
jmargeta/scikit-learn
sklearn/linear_model/tests/test_passive_aggressive.py
31
6147
import numpy as np import scipy.sparse as sp from sklearn.utils.testing import assert_less from sklearn.utils.testing import assert_greater from sklearn.utils.testing import assert_array_almost_equal, assert_array_equal from sklearn.utils.testing import assert_raises from sklearn.base import ClassifierMixin from sklearn.utils import check_random_state from sklearn.datasets import load_iris from sklearn.linear_model import PassiveAggressiveClassifier from sklearn.linear_model import PassiveAggressiveRegressor iris = load_iris() random_state = check_random_state(12) indices = np.arange(iris.data.shape[0]) random_state.shuffle(indices) X = iris.data[indices] y = iris.target[indices] X_csr = sp.csr_matrix(X) class MyPassiveAggressive(ClassifierMixin): def __init__(self, C=1.0, epsilon=0.01, loss="hinge", fit_intercept=True, n_iter=1, random_state=None): self.C = C self.epsilon = epsilon self.loss = loss self.fit_intercept = fit_intercept self.n_iter = n_iter def fit(self, X, y): n_samples, n_features = X.shape self.w = np.zeros(n_features, dtype=np.float64) self.b = 0.0 for t in range(self.n_iter): for i in range(n_samples): p = self.project(X[i]) if self.loss in ("hinge", "squared_hinge"): loss = max(1 - y[i] * p, 0) else: loss = max(np.abs(p - y[i]) - self.epsilon, 0) sqnorm = np.dot(X[i], X[i]) if self.loss in ("hinge", "epsilon_insensitive"): step = min(self.C, loss / sqnorm) elif self.loss in ("squared_hinge", "squared_epsilon_insensitive"): step = loss / (sqnorm + 1.0 / (2 * self.C)) if self.loss in ("hinge", "squared_hinge"): step *= y[i] else: step *= np.sign(y[i] - p) self.w += step * X[i] if self.fit_intercept: self.b += step def project(self, X): return np.dot(X, self.w) + self.b def test_classifier_accuracy(): for data in (X, X_csr): for fit_intercept in (True, False): clf = PassiveAggressiveClassifier(C=1.0, n_iter=30, fit_intercept=fit_intercept, random_state=0) clf.fit(data, y) score = clf.score(data, y) assert_greater(score, 0.79) def test_classifier_partial_fit(): classes = np.unique(y) for data in (X, X_csr): clf = PassiveAggressiveClassifier(C=1.0, fit_intercept=True, random_state=0) for t in range(30): clf.partial_fit(data, y, classes) score = clf.score(data, y) assert_greater(score, 0.79) def test_classifier_refit(): """Classifier can be retrained on different labels and features.""" clf = PassiveAggressiveClassifier().fit(X, y) assert_array_equal(clf.classes_, np.unique(y)) clf.fit(X[:, :-1], iris.target_names[y]) assert_array_equal(clf.classes_, iris.target_names) def test_classifier_correctness(): y_bin = y.copy() y_bin[y != 1] = -1 for loss in ("hinge", "squared_hinge"): clf1 = MyPassiveAggressive(C=1.0, loss=loss, fit_intercept=True, n_iter=2) clf1.fit(X, y_bin) for data in (X, X_csr): clf2 = PassiveAggressiveClassifier(C=1.0, loss=loss, fit_intercept=True, n_iter=2) clf2.fit(data, y_bin) assert_array_almost_equal(clf1.w, clf2.coef_.ravel(), decimal=2) def test_classifier_undefined_methods(): clf = PassiveAggressiveClassifier() for meth in ("predict_proba", "predict_log_proba", "transform"): assert_raises(AttributeError, lambda x: getattr(clf, x), meth) def test_regressor_mse(): y_bin = y.copy() y_bin[y != 1] = -1 for data in (X, X_csr): for fit_intercept in (True, False): reg = PassiveAggressiveRegressor(C=1.0, n_iter=50, fit_intercept=fit_intercept, random_state=0) reg.fit(data, y_bin) pred = reg.predict(data) assert_less(np.mean((pred - y_bin) ** 2), 1.7) def test_regressor_partial_fit(): y_bin = y.copy() y_bin[y != 1] = -1 for data in (X, X_csr): reg = PassiveAggressiveRegressor(C=1.0, fit_intercept=True, random_state=0) for t in range(50): reg.partial_fit(data, y_bin) pred = reg.predict(data) assert_less(np.mean((pred - y_bin) ** 2), 1.7) def test_regressor_correctness(): y_bin = y.copy() y_bin[y != 1] = -1 for loss in ("epsilon_insensitive", "squared_epsilon_insensitive"): reg1 = MyPassiveAggressive(C=1.0, loss=loss, fit_intercept=True, n_iter=2) reg1.fit(X, y_bin) for data in (X, X_csr): reg2 = PassiveAggressiveRegressor(C=1.0, loss=loss, fit_intercept=True, n_iter=2) reg2.fit(data, y_bin) assert_array_almost_equal(reg1.w, reg2.coef_.ravel(), decimal=2) def test_regressor_undefined_methods(): reg = PassiveAggressiveRegressor() for meth in ("transform",): assert_raises(AttributeError, lambda x: getattr(reg, x), meth)
bsd-3-clause
thientu/scikit-learn
examples/cluster/plot_feature_agglomeration_vs_univariate_selection.py
217
3893
""" ============================================== Feature agglomeration vs. univariate selection ============================================== This example compares 2 dimensionality reduction strategies: - univariate feature selection with Anova - feature agglomeration with Ward hierarchical clustering Both methods are compared in a regression problem using a BayesianRidge as supervised estimator. """ # Author: Alexandre Gramfort <alexandre.gramfort@inria.fr> # License: BSD 3 clause print(__doc__) import shutil import tempfile import numpy as np import matplotlib.pyplot as plt from scipy import linalg, ndimage from sklearn.feature_extraction.image import grid_to_graph from sklearn import feature_selection from sklearn.cluster import FeatureAgglomeration from sklearn.linear_model import BayesianRidge from sklearn.pipeline import Pipeline from sklearn.grid_search import GridSearchCV from sklearn.externals.joblib import Memory from sklearn.cross_validation import KFold ############################################################################### # Generate data n_samples = 200 size = 40 # image size roi_size = 15 snr = 5. np.random.seed(0) mask = np.ones([size, size], dtype=np.bool) coef = np.zeros((size, size)) coef[0:roi_size, 0:roi_size] = -1. coef[-roi_size:, -roi_size:] = 1. X = np.random.randn(n_samples, size ** 2) for x in X: # smooth data x[:] = ndimage.gaussian_filter(x.reshape(size, size), sigma=1.0).ravel() X -= X.mean(axis=0) X /= X.std(axis=0) y = np.dot(X, coef.ravel()) noise = np.random.randn(y.shape[0]) noise_coef = (linalg.norm(y, 2) / np.exp(snr / 20.)) / linalg.norm(noise, 2) y += noise_coef * noise # add noise ############################################################################### # Compute the coefs of a Bayesian Ridge with GridSearch cv = KFold(len(y), 2) # cross-validation generator for model selection ridge = BayesianRidge() cachedir = tempfile.mkdtemp() mem = Memory(cachedir=cachedir, verbose=1) # Ward agglomeration followed by BayesianRidge connectivity = grid_to_graph(n_x=size, n_y=size) ward = FeatureAgglomeration(n_clusters=10, connectivity=connectivity, memory=mem) clf = Pipeline([('ward', ward), ('ridge', ridge)]) # Select the optimal number of parcels with grid search clf = GridSearchCV(clf, {'ward__n_clusters': [10, 20, 30]}, n_jobs=1, cv=cv) clf.fit(X, y) # set the best parameters coef_ = clf.best_estimator_.steps[-1][1].coef_ coef_ = clf.best_estimator_.steps[0][1].inverse_transform(coef_) coef_agglomeration_ = coef_.reshape(size, size) # Anova univariate feature selection followed by BayesianRidge f_regression = mem.cache(feature_selection.f_regression) # caching function anova = feature_selection.SelectPercentile(f_regression) clf = Pipeline([('anova', anova), ('ridge', ridge)]) # Select the optimal percentage of features with grid search clf = GridSearchCV(clf, {'anova__percentile': [5, 10, 20]}, cv=cv) clf.fit(X, y) # set the best parameters coef_ = clf.best_estimator_.steps[-1][1].coef_ coef_ = clf.best_estimator_.steps[0][1].inverse_transform(coef_) coef_selection_ = coef_.reshape(size, size) ############################################################################### # Inverse the transformation to plot the results on an image plt.close('all') plt.figure(figsize=(7.3, 2.7)) plt.subplot(1, 3, 1) plt.imshow(coef, interpolation="nearest", cmap=plt.cm.RdBu_r) plt.title("True weights") plt.subplot(1, 3, 2) plt.imshow(coef_selection_, interpolation="nearest", cmap=plt.cm.RdBu_r) plt.title("Feature Selection") plt.subplot(1, 3, 3) plt.imshow(coef_agglomeration_, interpolation="nearest", cmap=plt.cm.RdBu_r) plt.title("Feature Agglomeration") plt.subplots_adjust(0.04, 0.0, 0.98, 0.94, 0.16, 0.26) plt.show() # Attempt to remove the temporary cachedir, but don't worry if it fails shutil.rmtree(cachedir, ignore_errors=True)
bsd-3-clause
ningchi/scikit-learn
sklearn/metrics/cluster/bicluster.py
25
2741
from __future__ import division import numpy as np from sklearn.utils.linear_assignment_ import linear_assignment from sklearn.utils.validation import check_consistent_length, check_array __all__ = ["consensus_score"] def _check_rows_and_columns(a, b): """Unpacks the row and column arrays and checks their shape.""" check_consistent_length(*a) check_consistent_length(*b) checks = lambda x: check_array(x, ensure_2d=False) a_rows, a_cols = map(checks, a) b_rows, b_cols = map(checks, b) return a_rows, a_cols, b_rows, b_cols def _jaccard(a_rows, a_cols, b_rows, b_cols): """Jaccard coefficient on the elements of the two biclusters.""" intersection = ((a_rows * b_rows).sum() * (a_cols * b_cols).sum()) a_size = a_rows.sum() * a_cols.sum() b_size = b_rows.sum() * b_cols.sum() return intersection / (a_size + b_size - intersection) def _pairwise_similarity(a, b, similarity): """Computes pairwise similarity matrix. result[i, j] is the Jaccard coefficient of a's bicluster i and b's bicluster j. """ a_rows, a_cols, b_rows, b_cols = _check_rows_and_columns(a, b) n_a = a_rows.shape[0] n_b = b_rows.shape[0] result = np.array(list(list(similarity(a_rows[i], a_cols[i], b_rows[j], b_cols[j]) for j in range(n_b)) for i in range(n_a))) return result def consensus_score(a, b, similarity="jaccard"): """The similarity of two sets of biclusters. Similarity between individual biclusters is computed. Then the best matching between sets is found using the Hungarian algorithm. The final score is the sum of similarities divided by the size of the larger set. Parameters ---------- a : (rows, columns) Tuple of row and column indicators for a set of biclusters. b : (rows, columns) Another set of biclusters like ``a``. similarity : string or function, optional, default: "jaccard" May be the string "jaccard" to use the Jaccard coefficient, or any function that takes four arguments, each of which is a 1d indicator vector: (a_rows, a_columns, b_rows, b_columns). References ---------- * Hochreiter, Bodenhofer, et. al., 2010. `FABIA: factor analysis for bicluster acquisition <https://www.ncbi.nlm.nih.gov/pmc/articles/PMC2881408/>`__. """ if similarity == "jaccard": similarity = _jaccard matrix = _pairwise_similarity(a, b, similarity) indices = linear_assignment(1. - matrix) n_a = len(a[0]) n_b = len(b[0]) return matrix[indices[:, 0], indices[:, 1]].sum() / max(n_a, n_b)
bsd-3-clause
laszlocsomor/tensorflow
tensorflow/examples/tutorials/estimators/abalone.py
73
6176
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """DNNRegressor with custom estimator for abalone dataset.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import argparse import sys import tempfile from six.moves import urllib import numpy as np import tensorflow as tf FLAGS = None tf.logging.set_verbosity(tf.logging.INFO) # Learning rate for the model LEARNING_RATE = 0.001 def maybe_download(train_data, test_data, predict_data): """Maybe downloads training data and returns train and test file names.""" if train_data: train_file_name = train_data else: train_file = tempfile.NamedTemporaryFile(delete=False) urllib.request.urlretrieve( "http://download.tensorflow.org/data/abalone_train.csv", train_file.name) train_file_name = train_file.name train_file.close() print("Training data is downloaded to %s" % train_file_name) if test_data: test_file_name = test_data else: test_file = tempfile.NamedTemporaryFile(delete=False) urllib.request.urlretrieve( "http://download.tensorflow.org/data/abalone_test.csv", test_file.name) test_file_name = test_file.name test_file.close() print("Test data is downloaded to %s" % test_file_name) if predict_data: predict_file_name = predict_data else: predict_file = tempfile.NamedTemporaryFile(delete=False) urllib.request.urlretrieve( "http://download.tensorflow.org/data/abalone_predict.csv", predict_file.name) predict_file_name = predict_file.name predict_file.close() print("Prediction data is downloaded to %s" % predict_file_name) return train_file_name, test_file_name, predict_file_name def model_fn(features, labels, mode, params): """Model function for Estimator.""" # Connect the first hidden layer to input layer # (features["x"]) with relu activation first_hidden_layer = tf.layers.dense(features["x"], 10, activation=tf.nn.relu) # Connect the second hidden layer to first hidden layer with relu second_hidden_layer = tf.layers.dense( first_hidden_layer, 10, activation=tf.nn.relu) # Connect the output layer to second hidden layer (no activation fn) output_layer = tf.layers.dense(second_hidden_layer, 1) # Reshape output layer to 1-dim Tensor to return predictions predictions = tf.reshape(output_layer, [-1]) # Provide an estimator spec for `ModeKeys.PREDICT`. if mode == tf.estimator.ModeKeys.PREDICT: return tf.estimator.EstimatorSpec( mode=mode, predictions={"ages": predictions}) # Calculate loss using mean squared error loss = tf.losses.mean_squared_error(labels, predictions) optimizer = tf.train.GradientDescentOptimizer( learning_rate=params["learning_rate"]) train_op = optimizer.minimize( loss=loss, global_step=tf.train.get_global_step()) # Calculate root mean squared error as additional eval metric eval_metric_ops = { "rmse": tf.metrics.root_mean_squared_error( tf.cast(labels, tf.float64), predictions) } # Provide an estimator spec for `ModeKeys.EVAL` and `ModeKeys.TRAIN` modes. return tf.estimator.EstimatorSpec( mode=mode, loss=loss, train_op=train_op, eval_metric_ops=eval_metric_ops) def main(unused_argv): # Load datasets abalone_train, abalone_test, abalone_predict = maybe_download( FLAGS.train_data, FLAGS.test_data, FLAGS.predict_data) # Training examples training_set = tf.contrib.learn.datasets.base.load_csv_without_header( filename=abalone_train, target_dtype=np.int, features_dtype=np.float64) # Test examples test_set = tf.contrib.learn.datasets.base.load_csv_without_header( filename=abalone_test, target_dtype=np.int, features_dtype=np.float64) # Set of 7 examples for which to predict abalone ages prediction_set = tf.contrib.learn.datasets.base.load_csv_without_header( filename=abalone_predict, target_dtype=np.int, features_dtype=np.float64) # Set model params model_params = {"learning_rate": LEARNING_RATE} # Instantiate Estimator nn = tf.estimator.Estimator(model_fn=model_fn, params=model_params) train_input_fn = tf.estimator.inputs.numpy_input_fn( x={"x": np.array(training_set.data)}, y=np.array(training_set.target), num_epochs=None, shuffle=True) # Train nn.train(input_fn=train_input_fn, steps=5000) # Score accuracy test_input_fn = tf.estimator.inputs.numpy_input_fn( x={"x": np.array(test_set.data)}, y=np.array(test_set.target), num_epochs=1, shuffle=False) ev = nn.evaluate(input_fn=test_input_fn) print("Loss: %s" % ev["loss"]) print("Root Mean Squared Error: %s" % ev["rmse"]) # Print out predictions predict_input_fn = tf.estimator.inputs.numpy_input_fn( x={"x": prediction_set.data}, num_epochs=1, shuffle=False) predictions = nn.predict(input_fn=predict_input_fn) for i, p in enumerate(predictions): print("Prediction %s: %s" % (i + 1, p["ages"])) if __name__ == "__main__": parser = argparse.ArgumentParser() parser.register("type", "bool", lambda v: v.lower() == "true") parser.add_argument( "--train_data", type=str, default="", help="Path to the training data.") parser.add_argument( "--test_data", type=str, default="", help="Path to the test data.") parser.add_argument( "--predict_data", type=str, default="", help="Path to the prediction data.") FLAGS, unparsed = parser.parse_known_args() tf.app.run(main=main, argv=[sys.argv[0]] + unparsed)
apache-2.0
DistrictDataLabs/yellowbrick
tests/test_regressor/test_prediction_error.py
1
11623
# tests.test_regressor.test_prediction_error # Ensure that the regressor prediction error visualization works. # # Author: Rebecca Bilbro # Author: Benjamin Bengfort # Created: Sat Oct 8 16:30:39 2016 -0400 # # Copyright (C) 2016 The scikit-yb developers # For license information, see LICENSE.txt # # ID: test_prediction_error.py [] $ """ Ensure that the regressor prediction error visualization works. """ ########################################################################## ## Imports ########################################################################## import pytest import matplotlib.pyplot as plt import numpy as np from unittest import mock from tests.fixtures import Dataset, Split from tests.base import IS_WINDOWS_OR_CONDA, VisualTestCase from yellowbrick.datasets import load_energy, load_concrete from yellowbrick.regressor.prediction_error import PredictionError, prediction_error from sklearn.datasets import make_regression from sklearn.linear_model import Ridge, Lasso from sklearn.neural_network import MLPRegressor from sklearn.linear_model import LinearRegression from sklearn.model_selection import train_test_split as tts from sklearn.pipeline import Pipeline from sklearn.impute import SimpleImputer try: import pandas as pd except ImportError: pd = None ########################################################################## ## Data ########################################################################## @pytest.fixture(scope="class") def data(request): """ Creates a fixture of train and test splits for the sklearn digits dataset For ease of use returns a Dataset named tuple composed of two Split tuples. """ X, y = make_regression( n_samples=500, n_features=22, n_informative=8, random_state=42, noise=0.2, bias=0.2, ) X_train, X_test, y_train, y_test = tts(X, y, test_size=0.2, random_state=11) # Set a class attribute for digits request.cls.data = Dataset(Split(X_train, X_test), Split(y_train, y_test)) ########################################################################## ## Prediction Error Test Cases ########################################################################## @pytest.mark.usefixtures("data") class TestPredictionError(VisualTestCase): """ Test the PredictionError visualizer """ @pytest.mark.filterwarnings("ignore:Stochastic Optimizer") @pytest.mark.filterwarnings("ignore:internal gelsd driver lwork query error") def test_prediction_error(self): """ Test image similarity of prediction error on random data """ _, ax = plt.subplots() model = MLPRegressor(random_state=229) visualizer = PredictionError(model, ax=ax) visualizer.fit(self.data.X.train, self.data.y.train) visualizer.score(self.data.X.test, self.data.y.test) visualizer.finalize() self.assert_images_similar(visualizer, tol=1, remove_legend=True) @pytest.mark.skipif(pd is None, reason="pandas is required") def test_prediction_error_pandas(self): """ Test Pandas real world dataset with image similarity on Ridge """ _, ax = plt.subplots() # Load the occupancy dataset from fixtures data = load_energy(return_dataset=True) X, y = data.to_pandas() # Create train/test splits splits = tts(X, y, test_size=0.2, random_state=8873) X_train, X_test, y_train, y_test = splits visualizer = PredictionError(Ridge(random_state=22), ax=ax) visualizer.fit(X_train, y_train) visualizer.score(X_test, y_test) visualizer.finalize() self.assert_images_similar(visualizer, tol=1, remove_legend=True) def test_prediction_error_numpy(self): """ Test NumPy real world dataset with image similarity on Ridge """ _, ax = plt.subplots() # Load the occupancy dataset from fixtures data = load_energy(return_dataset=True) X, y = data.to_numpy() # Create train/test splits splits = tts(X, y, test_size=0.2, random_state=8873) X_train, X_test, y_train, y_test = splits visualizer = PredictionError(Ridge(random_state=22), ax=ax) visualizer.fit(X_train, y_train) visualizer.score(X_test, y_test) visualizer.finalize() self.assert_images_similar(visualizer, tol=1, remove_legend=True) def test_score(self): """ Assert returns R2 score """ visualizer = PredictionError(LinearRegression()) visualizer.fit(self.data.X.train, self.data.y.train) score = visualizer.score(self.data.X.test, self.data.y.test) assert score == pytest.approx(0.9999983124154965) assert visualizer.score_ == score def test_peplot_shared_limits(self): """ Test shared limits on the peplot """ visualizer = PredictionError(LinearRegression(), shared_limits=False) visualizer.fit(self.data.X.train, self.data.y.train) visualizer.score(self.data.X.test, self.data.y.test) visualizer.finalize() xlim = tuple(map(int, visualizer.ax.get_xlim())) ylim = tuple(map(int, visualizer.ax.get_ylim())) assert xlim == ylim @pytest.mark.filterwarnings("ignore:internal gelsd driver lwork query error") def test_peplot_no_shared_limits(self): """ Test image similarity with no shared limits on the peplot """ visualizer = PredictionError(Ridge(random_state=43), shared_limits=False) visualizer.fit(self.data.X.train, self.data.y.train) visualizer.score(self.data.X.test, self.data.y.test) visualizer.finalize() xlim = tuple(map(int, visualizer.ax.get_xlim())) ylim = tuple(map(int, visualizer.ax.get_ylim())) assert not xlim == ylim self.assert_images_similar(visualizer, tol=1.0, remove_legend=True) def test_peplot_no_lines(self): """ Test image similarity with no lines drawn on the plot """ visualizer = PredictionError( Lasso(random_state=23, alpha=10), bestfit=False, identity=False ) visualizer.fit(self.data.X.train, self.data.y.train) visualizer.score(self.data.X.test, self.data.y.test) visualizer.finalize() self.assert_images_similar(visualizer, tol=1.0, remove_legend=True) def test_alpha_param(self): """ Test that the user can supply an alpha param on instantiation """ # Instantiate a sklearn regressor model = Lasso(random_state=23, alpha=10) # Instantiate a prediction error plot, provide custom alpha visualizer = PredictionError(model, bestfit=False, identity=False, alpha=0.7) # Test param gets set correctly assert visualizer.alpha == 0.7 # Mock ax and fit the visualizer visualizer.ax = mock.MagicMock(autospec=True) visualizer.fit(self.data.X.train, self.data.y.train) visualizer.score(self.data.X.test, self.data.y.test) # Test that alpha was passed to internal matplotlib scatterplot _, scatter_kwargs = visualizer.ax.scatter.call_args assert "alpha" in scatter_kwargs assert scatter_kwargs["alpha"] == 0.7 def test_is_fitted_param(self): """ Test that the user can supply an is_fitted param and it's state is maintained """ # Instantiate a sklearn regressor model = Lasso(random_state=23, alpha=10) # Instantiate a prediction error plot, provide custom alpha visualizer = PredictionError(model, bestfit=False, identity=False, is_fitted=False) # Test param gets set correctly assert visualizer.is_fitted == False @pytest.mark.xfail( reason="""third test fails with AssertionError: Expected fit to be called once. Called 0 times.""" ) def test_peplot_with_fitted(self): """ Test that PredictionError properly handles an already-fitted model """ X, y = load_energy(return_dataset=True).to_numpy() model = Ridge().fit(X, y) with mock.patch.object(model, "fit") as mockfit: oz = PredictionError(model) oz.fit(X, y) mockfit.assert_not_called() with mock.patch.object(model, "fit") as mockfit: oz = PredictionError(model, is_fitted=True) oz.fit(X, y) mockfit.assert_not_called() with mock.patch.object(model, "fit") as mockfit: oz = PredictionError(model, is_fitted=False) oz.fit(X, y) mockfit.assert_called_once_with(X, y) @pytest.mark.xfail( IS_WINDOWS_OR_CONDA, reason="font rendering different in OS and/or Python; see #892", ) def test_prediction_error_quick_method(self): """ Image similarity test using the residuals plot quick method """ _, ax = plt.subplots() model = Lasso(random_state=19) oz = prediction_error( model, self.data.X.train, self.data.y.train, ax=ax, show=False ) assert isinstance(oz, PredictionError) self.assert_images_similar(oz) def test_within_pipeline(self): """ Test that visualizer can be accessed within a sklearn pipeline """ X, y = load_concrete() X_train, X_test, y_train, y_test = tts(X, y, test_size=0.2, random_state=42) model = Pipeline([ ('imputer', SimpleImputer(missing_values=np.nan, strategy='mean')), ('pe', PredictionError(Lasso())) ]) model.fit(X_train, y_train) model.score(X_test, y_test) model['pe'].finalize() self.assert_images_similar(model['pe'], tol=2.0) def test_within_pipeline_quickmethod(self): """ Test that visualizer can be accessed within a sklearn pipeline """ X, y = load_concrete() X_train, X_test, y_train, y_test = tts(X, y, test_size=0.2, random_state=42) model = Pipeline([ ('imputer', SimpleImputer(missing_values=np.nan, strategy='mean')), ('pe', PredictionError(Lasso())) ]) model.fit(X_train, y_train) model.score(X_test, y_test) model['pe'].finalize() self.assert_images_similar(model['pe'], tol=2.0) def test_pipeline_as_model_input(self): """ Test that visualizer can handle sklearn pipeline as model input """ X, y = load_concrete() X_train, X_test, y_train, y_test = tts(X, y, test_size=0.2, random_state=42) model = Pipeline([ ('imputer', SimpleImputer(missing_values=np.nan, strategy='mean')), ('lasso', Lasso()) ]) oz = PredictionError(model) oz.fit(X_train, y_train) oz.score(X_test, y_test) oz.finalize() self.assert_images_similar(oz, tol=2.0) def test_pipeline_as_model_input_quickmethod(self): """ Test that visualizer can handle sklearn pipeline as model input """ X, y = load_concrete() X_train, X_test, y_train, y_test = tts(X, y, test_size=0.2, random_state=42) model = Pipeline([ ('imputer', SimpleImputer(missing_values=np.nan, strategy='mean')), ('lasso', Lasso()) ]) oz = prediction_error(model, X_train, y_train, X_test, y_test) oz.finalize() self.assert_images_similar(oz, tol=2.0)
apache-2.0
PeterPetrik/QGIS
python/plugins/processing/algs/gdal/translate.py
35
7382
# -*- coding: utf-8 -*- """ *************************************************************************** translate.py --------------------- Date : August 2012 Copyright : (C) 2012 by Victor Olaya Email : volayaf at gmail dot com *************************************************************************** * * * This program is free software; you can redistribute it and/or modify * * it under the terms of the GNU General Public License as published by * * the Free Software Foundation; either version 2 of the License, or * * (at your option) any later version. * * * *************************************************************************** """ __author__ = 'Victor Olaya' __date__ = 'August 2012' __copyright__ = '(C) 2012, Victor Olaya' import os from qgis.PyQt.QtGui import QIcon from qgis.core import (QgsRasterFileWriter, QgsProcessingException, QgsProcessingParameterDefinition, QgsProcessingParameterRasterLayer, QgsProcessingParameterNumber, QgsProcessingParameterBoolean, QgsProcessingParameterString, QgsProcessingParameterEnum, QgsProcessingParameterCrs, QgsProcessingParameterRasterDestination) from processing.algs.gdal.GdalAlgorithm import GdalAlgorithm from processing.algs.gdal.GdalUtils import GdalUtils pluginPath = os.path.split(os.path.split(os.path.dirname(__file__))[0])[0] class translate(GdalAlgorithm): INPUT = 'INPUT' TARGET_CRS = 'TARGET_CRS' NODATA = 'NODATA' COPY_SUBDATASETS = 'COPY_SUBDATASETS' OPTIONS = 'OPTIONS' EXTRA = 'EXTRA' DATA_TYPE = 'DATA_TYPE' OUTPUT = 'OUTPUT' def __init__(self): super().__init__() def initAlgorithm(self, config=None): self.TYPES = [self.tr('Use Input Layer Data Type'), 'Byte', 'Int16', 'UInt16', 'UInt32', 'Int32', 'Float32', 'Float64', 'CInt16', 'CInt32', 'CFloat32', 'CFloat64'] self.addParameter(QgsProcessingParameterRasterLayer(self.INPUT, self.tr('Input layer'))) self.addParameter(QgsProcessingParameterCrs(self.TARGET_CRS, self.tr('Override the projection for the output file'), defaultValue=None, optional=True)) self.addParameter(QgsProcessingParameterNumber(self.NODATA, self.tr('Assign a specified nodata value to output bands'), type=QgsProcessingParameterNumber.Double, defaultValue=None, optional=True)) self.addParameter(QgsProcessingParameterBoolean(self.COPY_SUBDATASETS, self.tr('Copy all subdatasets of this file to individual output files'), defaultValue=False)) options_param = QgsProcessingParameterString(self.OPTIONS, self.tr('Additional creation options'), defaultValue='', optional=True) options_param.setFlags(options_param.flags() | QgsProcessingParameterDefinition.FlagAdvanced) options_param.setMetadata({ 'widget_wrapper': { 'class': 'processing.algs.gdal.ui.RasterOptionsWidget.RasterOptionsWidgetWrapper'}}) self.addParameter(options_param) extra_param = QgsProcessingParameterString(self.EXTRA, self.tr('Additional command-line parameters'), defaultValue=None, optional=True) extra_param.setFlags(extra_param.flags() | QgsProcessingParameterDefinition.FlagAdvanced) self.addParameter(extra_param) dataType_param = QgsProcessingParameterEnum(self.DATA_TYPE, self.tr('Output data type'), self.TYPES, allowMultiple=False, defaultValue=0) dataType_param.setFlags(dataType_param.flags() | QgsProcessingParameterDefinition.FlagAdvanced) self.addParameter(dataType_param) self.addParameter(QgsProcessingParameterRasterDestination(self.OUTPUT, self.tr('Converted'))) def name(self): return 'translate' def displayName(self): return self.tr('Translate (convert format)') def group(self): return self.tr('Raster conversion') def groupId(self): return 'rasterconversion' def icon(self): return QIcon(os.path.join(pluginPath, 'images', 'gdaltools', 'translate.png')) def commandName(self): return 'gdal_translate' def getConsoleCommands(self, parameters, context, feedback, executing=True): inLayer = self.parameterAsRasterLayer(parameters, self.INPUT, context) if inLayer is None: raise QgsProcessingException(self.invalidRasterError(parameters, self.INPUT)) out = self.parameterAsOutputLayer(parameters, self.OUTPUT, context) self.setOutputValue(self.OUTPUT, out) if self.NODATA in parameters and parameters[self.NODATA] is not None: nodata = self.parameterAsDouble(parameters, self.NODATA, context) else: nodata = None arguments = [] crs = self.parameterAsCrs(parameters, self.TARGET_CRS, context) if crs.isValid(): arguments.append('-a_srs') arguments.append(GdalUtils.gdal_crs_string(crs)) if nodata is not None: arguments.append('-a_nodata') arguments.append(nodata) if self.parameterAsBoolean(parameters, self.COPY_SUBDATASETS, context): arguments.append('-sds') data_type = self.parameterAsEnum(parameters, self.DATA_TYPE, context) if data_type: arguments.append('-ot ' + self.TYPES[data_type]) arguments.append('-of') arguments.append(QgsRasterFileWriter.driverForExtension(os.path.splitext(out)[1])) options = self.parameterAsString(parameters, self.OPTIONS, context) if options: arguments.extend(GdalUtils.parseCreationOptions(options)) if self.EXTRA in parameters and parameters[self.EXTRA] not in (None, ''): extra = self.parameterAsString(parameters, self.EXTRA, context) arguments.append(extra) arguments.append(inLayer.source()) arguments.append(out) return [self.commandName(), GdalUtils.escapeAndJoin(arguments)]
gpl-2.0
MasX/kille
src/kille.py
1
20357
#!/usr/bin/env python # -*- coding: utf-8 -*- """ """ import rospy import sys import cv2 import cv2.cv as cv from sensor_msgs.msg import Image from std_msgs.msg import String from cv_bridge import CvBridge, CvBridgeError import numpy as np import codecs import collections import random from sklearn import svm, feature_extraction import recognize import pickle import time import matplotlib.pyplot as plt class Recognizer(): def __init__(self): self.start_time = time.time() self.demo_on = False self.evaluation_mode = False self.return_average = False self.evaluation_frames = [] self.node_name = "kille_recognizer" rospy.init_node(self.node_name) rospy.on_shutdown(self.cleanup) # Create the OpenCV display window for the RGB image self.cv_window_name = self.node_name cv.NamedWindow("RGB Image", cv.CV_WINDOW_NORMAL) cv.MoveWindow("RGB Image", 25, 75) # And one for the depth image cv.NamedWindow("Depth Image", cv.CV_WINDOW_NORMAL) cv.MoveWindow("Depth Image", 25, 350) self.known_objects = [] self.sift = cv2.SIFT() self.taken_actions = [] self.current_depth = None self.current_stage = None self.bridge = CvBridge() self.pub = rospy.Publisher("DMsubscriber", String) self.rate = rospy.Rate(10) # 10hz self.last_recognized = None self.linclassifier = svm.LinearSVC() self.learned_relations = [] self.region_of_interest = (1, 1000) self.recognitions = [] self.image_sub = rospy.Subscriber("/camera/rgb/image_color", Image, self.image_callback) self.depth_sub = rospy.Subscriber("/camera/depth/image_raw", Image, self.depth_callback) rospy.Subscriber("DMpublisher", String, self.receive, queue_size=1) try: self.learned_relations = pickle.load(open("saved_locations", "rb")) self.train_locations() # learned_relations is the list of relations in memory before training the classifier except: print "failed loading saved relations" try: # the features are not serializable, hence this work-around to load them from a file pickled_objects = pickle.load(open("saved_objects", "rb")) self.known_objects = [] for obj in pickled_objects: kps = [] for point in obj[1]: temp_kp = cv2.KeyPoint(x=point[0][0], y=point[0][1], _size=point[1], _angle=point[2], _response=point[3], _octave=point[4], _class_id=point[5]) kps.append(temp_kp) self.known_objects.append((obj[0], kps, obj[2])) except: print "couldn't load learned objects file" rospy.loginfo("Waiting for image topics...") # this function is called for every rgb frame def image_callback(self, ros_image): # Use cv_bridge() to convert the ROS image to OpenCV format depth_frame = self.current_depth try: frame = np.array(self.bridge.imgmsg_to_cv2(ros_image, "bgr8"), dtype=np.uint8) except CvBridgeError, e: print e display_image = self.process_image(frame, depth_frame) cv2.imshow("RGB Image", display_image) cv.WaitKey(5) # HighGui (of which imshow() is a function of) needs waitkey so it has time to process its event loop # see what the next thing to do is, e.g. if a(n external) order has come in # this is inefficient and creates a small lag between depth frame processing and rgb frame processing # however, I have not found a reliable way to circumvent this, due to the necessity to catch the rgb and d frames # through the 'image_callback()' function. if self.current_stage is not None: self.taken_actions.append((self.current_stage, time.time() - self.start_time)) method = self.current_stage[0] if method == self.send_scores: if self.evaluation_mode: if len(self.evaluation_frames) == 10: arguments = [display_image] + [depth_frame] + self.current_stage[1:] method(*arguments) self.current_stage = None self.evaluation_frames = [] else: self.evaluation_frames.append((display_image, depth_frame)) else: arguments = [display_image] + [depth_frame] + self.current_stage[1:] method(*arguments) self.current_stage = None else: arguments = [display_image] + self.current_stage[1:] method(*arguments) self.current_stage = None # this one is called for every depth-frame (which is in gray-scale) def depth_callback(self, ros_image): try: # The depth image is a single-channel float32 image depth_image = self.bridge.imgmsg_to_cv2(ros_image, "16UC1") except CvBridgeError, e: print e depth_array = np.array(depth_image, dtype=np.float32) depth_array = np.roll(depth_array, -15) self.current_depth = np.copy(depth_array) max_depth = self.region_of_interest[1] depth_array[depth_array < self.region_of_interest[0]] = max_depth depth_array[depth_array > self.region_of_interest[1]] = max_depth # Normalize the depth image to fall between 0 (black) and 1 (white) cv2.normalize(depth_array, depth_array, 0, 1, cv2.NORM_MINMAX) # Process the depth image depth_display_image = self.process_depth_image(depth_array) # Display the result cv2.imshow("Depth Image", depth_display_image) def process_depth_image(self, frame): return frame # return self.good_Features(frame) def process_image(self, frame, depth_frame): # set every pixel that is outside of the RoI to white (255,255,255). frame[np.tile(depth_frame > self.region_of_interest[1], (1, 1, 3))] = 255 frame[np.tile(depth_frame < self.region_of_interest[0], (1, 1, 3))] = 255 # show SIFT features if self.demo_on: kp, des = self.sift.detectAndCompute(frame, None) #frame = cv2.drawKeypoints(frame, kp, color=(0, 255, 0), flags=0) frame = cv2.drawKeypoints(frame,kp,flags=cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS) return frame def learn_new_object(self, frame, name): kp, des = self.sift.detectAndCompute(frame, None) self.known_objects.append((name, kp, des)) self.send("learned:%s" % name) def learn_object_location(self, frame, lst): print "learning relation" name, name2, location = lst recognized = recognize.recognize_object(frame, self.current_depth, self.known_objects)[2] if recognized: recognized = self.get_unique_recognized(recognized) first_loc = None second_loc = None for rec_item in recognized: if rec_item[1] == name: first_loc = rec_item[2] elif rec_item[1] == name2: second_loc = rec_item[2] if first_loc and second_loc: self.learn_relation(location, first_loc, second_loc) else: self.send("failed to recognize objects") else: self.send("failed to recognize objects") @staticmethod def get_unique_recognized(self, recognized): unique = [] unique_names_only = [] for rec_object in recognized: if rec_object[1] not in unique_names_only: unique_names_only.append(rec_object[1]) unique.append(rec_object) return unique def learn_relation(self, location, coord1, coord2): print coord1 print coord2 relative_coord = {'x': (coord1[0] - coord2[0]), 'y': (coord1[1] - coord2[1]), 'z': coord1[2] - coord2[2]} print relative_coord self.learned_relations.append((relative_coord, location)) self.send(self.train_locations()) def train_locations(self): x1 = [x[0] for x in self.learned_relations] y1 = [x[1] for x in self.learned_relations] if len(set(y1)) > 1: x1e = feature_extraction.DictVectorizer().fit_transform(x1) self.linclassifier.fit(x1e, y1) return "relation:%s" % x[-1:] else: return "relation:%s" % x[-1:] def get_relation(self, coord1, coord2): if len(set([x[1] for x in self.learned_relations])) < 2: return "unknown" relative_coord = {'x': (coord1[0] - coord2[0]), 'y': (coord1[1] - coord2[1]), 'z': coord1[2] - coord2[2]} print relative_coord print len(self.learned_relations) return self.linclassifier.predict(feature_extraction.DictVectorizer().fit_transform([relative_coord]))[0] def reinforce_object(self, frame): kp, des = self.sift.detectAndCompute(frame, None) self.known_objects.append((self.last_recognized[0], kp, des)) self.send("reinforced:%s" % self.last_recognized[0]) def unlearn_object(self, frame): name = self.known_objects[-1][0] self.known_objects = self.known_objects[:-1] self.send("unlearned:%s" % name) def unlearn_specific(self, frame, name): # diff_len is not used currently old_len = len(self.known_objects) self.known_objects = [x for x in self.known_objects if x[0] != name] diff_len = len(self.known_objects) - old_len self.send("removed:%s" % diff_len) def correct_last_object(self, frame, name): if len(self.known_objects) > 0: old_name = self.known_objects[-1][0] self.known_objects[-1] = (name, self.known_objects[-1][1], self.known_objects[-1][2]) self.send("changed:%s:%s" % (old_name, name)) else: self.send("I do not know any objects") # this method sends the scores back to ROS, in my implementation to ROSJava def send_scores(self, frame, depth_frame, plurality): if self.evaluation_mode: list_of_relations = [] list_of_scores = [] for frm, depth_frm in self.evaluation_frames: kp, des, scores = recognize.recognize_object(frm, depth_frm, self.known_objects) if scores: relation = "" if len(scores) > 1 and plurality == "plural": if not any([np.isnan(x) for x in scores[0][2]]) and not any( [np.isnan(y) for y in scores[1][2]]): relation += self.get_relation(scores[0][2], scores[1][2]) list_of_relations.append(relation) list_of_scores.append(scores) if not list_of_relations: self.send("detected none") return "detected none" relation = collections.Counter(list_of_relations).most_common(1)[0][0] combined_scores = self.combine_trials(list_of_scores) self.send(self.send_combined_entries(combined_scores, relation, plurality), plurality) if combined_scores: self.last_recognized = (combined_scores[0][1], kp, des) print [(x[0][1], x[0][0]) for x in list_of_scores] return combined_scores else: kp, des, scores = recognize.recognize_object(frame, depth_frame, self.known_objects) if scores: relation = "" combined_scores = self.combine_entries(scores) if len(combined_scores) > 1 and plurality == "plural": relation += self.get_relation(combined_scores[0][2], combined_scores[1][2]) if plurality == "plural": self.send(self.send_combined_entries(combined_scores, relation, plurality)) else: self.send(self.send_combined_entries([combined_scores[0]], relation, plurality)) self.last_recognized = (combined_scores[0][1], kp, des) return combined_scores else: self.send("detected none") return "detected none" # when different frames had to be tested, this method combines the results def combine_trials(self, lst): unique_items = [] for scores in lst: for x in scores: if x[1] not in unique_items: unique_items.append(x[1]) scored_list = [] scored_list2 = [] for y in unique_items: total = [] total2 = [] total_coord = [] lendes = [] lendes2 = [] for trial in lst: for x in trial: if x[1] == y: total.append(x[0]) total2.append(x[4]) lendes.append(x[5]) lendes2.append(x[6]) total_coord.append(x[2]) average_coord = (int(sum([z[0] for z in total_coord]) / len(total_coord)), int(sum([z[1] for z in total_coord]) / len(total_coord)), int(sum([z[2] for z in total_coord]) / len(total_coord))) if self.return_average: scored_list.append(((sum(total) / len(total)), y, average_coord)) else: scored_list.append((max(total), y, average_coord)) scored_list2.append((y, max(total2), lendes[total2.index(max(total2))], lendes2[total2.index(max(total2))], average_coord)) to_print = "" to_location = [] for i in sorted(scored_list2, key=lambda x: x[1], reverse=True): print "%s %s" % (i[0],i[1]) to_location.append((i[0],i[4])) to_print += ",%s %s %s" % (i[1],i[2],i[3]) #print to_print print "%s" % [x[0] for x in to_location] print "%s, %s, %s, %s"% (to_location[0][0], to_location[1][0], to_location[0][1], to_location[1][1]) print "%s, %s, %s" % (to_location[0][0], to_location[1][0], self.get_relation(to_location[0][1],to_location[1][1])) return sorted(scored_list2) #return sorted(scored_list, reverse=True) def combine_entries(self, lst): unique_items = [] for x in lst: if x[1] not in unique_items: unique_items.append(x[1]) scored_list = [] for y in unique_items: total = [] total_coord = [] for x in lst: if x[1] == y: total.append(x[0]) total_coord.append(x[2]) average_coord = (int(sum([z[0] for z in total_coord]) / len(total_coord)), int(sum([z[1] for z in total_coord]) / len(total_coord)), int(sum([z[2] for z in total_coord]) / len(total_coord))) if self.return_average: scored_list.append(((sum(total) / len(total)), y, average_coord)) else: scored_list.append((max(total), y, average_coord)) return sorted(scored_list, reverse=True) @staticmethod def send_combined_entries(self, lst, relation, plurality): if plurality == "plural": to_send = "detectedloc:%s:%s:" % (len(lst), relation) else: to_send = "detected:%s:%s:" % (len(lst), relation) for entry in lst: to_send += '%s,%s:' % (entry[1], entry[0]) return to_send # called at shutdown. Saves learned stuff to files and gracefully exits. def cleanup(self): pickle.dump(self.learned_relations, open("saved_locations", "wb")) pickleable_known_objects = [] for object in self.known_objects: name, kp, des = object kp_list = [] for point in kp: temp_kp = (point.pt, point.size, point.angle, point.response, point.octave, point.class_id) kp_list.append(temp_kp) pickleable_known_objects.append((name, kp_list, des)) pickle.dump(pickleable_known_objects, open("saved_objects", "wb")) if self.taken_actions: with open("taken_actions", "a") as write_taken_actions: for action in self.taken_actions: write_taken_actions.write("%s : %s : %s\n" % (action[0][0].__name__, action[0][1:], action[1])) print "Shut down." cv2.destroyAllWindows() def send(self, data): extra = " : " if self.has_enough_knowledge(): extra = self.has_enough_knowledge() elif len(set([x[1] for x in self.learned_relations])) < 2 and len(set([x[0] for x in self.known_objects] > 1)): extra = "relation:more" print "sending: %s~%s" % (data, extra) self.pub.publish(data + "~" + extra) self.rate.sleep() # tests wether the system considers itself to know enough about objects def has_enough_knowledge(self): objects_to_query = [x[0] for x in self.known_objects] if self.current_stage[0] == self.learn_new_object: objects_to_query = [y for y in objects_to_query if y != self.current_stage[1]] objects_freq = collections.Counter(objects_to_query) for x in objects_freq: if objects_freq[x] < 3: if random.randrange(2) == 1: return 'object:%s' % x return " " def reinforce_relation(self, data): # TODO ? needs names of the objects ideally, which are not stored anywhere at the moment name = self.learned_relations[-1][1] def unlearn_relation(self, data): name = self.learned_relations[-1][1] self.learned_relations = self.learned_relations[:-1] self.train_locations() self.send("unlearned-last-relation:%s" % name) def unlearn_specific_relation(self, frame, name): # diff_len is not used currently old_len = len(self.learned_relations) self.known_objects = [x for x in self.learned_relations if x[0] != name] self.train_locations() diff_len = len(self.known_objects) - old_len self.send("removed-relation:%s" % diff_len) # handles input from rosjava (or substituting dialogue interface def receive(self, msg): data = codecs.decode(msg.data, 'utf-8').split(":") print "received:", data if data[0] == "learn": print 'trying to learn ' + data[1] self.current_stage = [self.learn_new_object, data[1]] elif data[0] == "what is this?": self.current_stage = [self.send_scores, "singular"] elif data[0] == "what are these?": self.current_stage = [self.send_scores, "plural"] elif data[0] == "last-object": self.current_stage = [self.reinforce_object] elif data[0] == "unlearn-last-object": self.current_stage = [self.unlearn_object] elif data[0] == "unlearn-object": self.current_stage = [self.unlearn_specific, data[1]] elif data[0] == "last-relation": self.current_stage = [self.reinforce_relation] elif data[0] == "unlearn-last-relation": self.current_stage = [self.unlearn_relation] elif data[0] == "unlearn-relation": self.current_stage = [self.unlearn_specific_relation, data[1]] elif data[0] == "relearn": self.current_stage = [self.correct_last_object, data[1]] elif data[0] == "relation": self.current_stage = [self.learn_object_location, (data[1], data[2], data[3])] else: print u"I don't understand", data def main(args): print("kille och rosdial") try: Recognizer() rospy.spin() except KeyboardInterrupt: print "Shut down." cv.DestroyAllWindows() def reduce_method(m): return (getattr, (m.__self__, m.__func__.__name__)) if __name__ == '__main__': main(sys.argv)
bsd-2-clause
ningchi/scikit-learn
sklearn/svm/base.py
12
33517
from __future__ import print_function import numpy as np import scipy.sparse as sp import warnings from abc import ABCMeta, abstractmethod from . import libsvm, liblinear from . import libsvm_sparse from ..base import BaseEstimator, ClassifierMixin from ..preprocessing import LabelEncoder from ..utils import check_array, check_random_state, column_or_1d from ..utils import ConvergenceWarning, compute_class_weight, deprecated from ..utils.extmath import safe_sparse_dot from ..utils.validation import check_is_fitted from ..externals import six LIBSVM_IMPL = ['c_svc', 'nu_svc', 'one_class', 'epsilon_svr', 'nu_svr'] def _one_vs_one_coef(dual_coef, n_support, support_vectors): """Generate primal coefficients from dual coefficients for the one-vs-one multi class LibSVM in the case of a linear kernel.""" # get 1vs1 weights for all n*(n-1) classifiers. # this is somewhat messy. # shape of dual_coef_ is nSV * (n_classes -1) # see docs for details n_class = dual_coef.shape[0] + 1 # XXX we could do preallocation of coef but # would have to take care in the sparse case coef = [] sv_locs = np.cumsum(np.hstack([[0], n_support])) for class1 in range(n_class): # SVs for class1: sv1 = support_vectors[sv_locs[class1]:sv_locs[class1 + 1], :] for class2 in range(class1 + 1, n_class): # SVs for class1: sv2 = support_vectors[sv_locs[class2]:sv_locs[class2 + 1], :] # dual coef for class1 SVs: alpha1 = dual_coef[class2 - 1, sv_locs[class1]:sv_locs[class1 + 1]] # dual coef for class2 SVs: alpha2 = dual_coef[class1, sv_locs[class2]:sv_locs[class2 + 1]] # build weight for class1 vs class2 coef.append(safe_sparse_dot(alpha1, sv1) + safe_sparse_dot(alpha2, sv2)) return coef class BaseLibSVM(six.with_metaclass(ABCMeta, BaseEstimator)): """Base class for estimators that use libsvm as backing library This implements support vector machine classification and regression. Parameter documentation is in the derived `SVC` class. """ # The order of these must match the integer values in LibSVM. # XXX These are actually the same in the dense case. Need to factor # this out. _sparse_kernels = ["linear", "poly", "rbf", "sigmoid", "precomputed"] @abstractmethod def __init__(self, impl, kernel, degree, gamma, coef0, tol, C, nu, epsilon, shrinking, probability, cache_size, class_weight, verbose, max_iter, random_state): if impl not in LIBSVM_IMPL: # pragma: no cover raise ValueError("impl should be one of %s, %s was given" % ( LIBSVM_IMPL, impl)) self._impl = impl self.kernel = kernel self.degree = degree self.gamma = gamma self.coef0 = coef0 self.tol = tol self.C = C self.nu = nu self.epsilon = epsilon self.shrinking = shrinking self.probability = probability self.cache_size = cache_size self.class_weight = class_weight self.verbose = verbose self.max_iter = max_iter self.random_state = random_state @property def _pairwise(self): # Used by cross_val_score. kernel = self.kernel return kernel == "precomputed" or callable(kernel) def fit(self, X, y, sample_weight=None): """Fit the SVM model according to the given training data. Parameters ---------- X : {array-like, sparse matrix}, shape (n_samples, n_features) Training vectors, where n_samples is the number of samples and n_features is the number of features. For kernel="precomputed", the expected shape of X is (n_samples, n_samples). y : array-like, shape (n_samples,) Target values (class labels in classification, real numbers in regression) sample_weight : array-like, shape (n_samples,) Per-sample weights. Rescale C per sample. Higher weights force the classifier to put more emphasis on these points. Returns ------- self : object Returns self. Notes ------ If X and y are not C-ordered and contiguous arrays of np.float64 and X is not a scipy.sparse.csr_matrix, X and/or y may be copied. If X is a dense array, then the other methods will not support sparse matrices as input. """ rnd = check_random_state(self.random_state) sparse = sp.isspmatrix(X) if sparse and self.kernel == "precomputed": raise TypeError("Sparse precomputed kernels are not supported.") self._sparse = sparse and not callable(self.kernel) X = check_array(X, accept_sparse='csr', dtype=np.float64, order='C') y = self._validate_targets(y) sample_weight = np.asarray([] if sample_weight is None else sample_weight, dtype=np.float64) solver_type = LIBSVM_IMPL.index(self._impl) # input validation if solver_type != 2 and X.shape[0] != y.shape[0]: raise ValueError("X and y have incompatible shapes.\n" + "X has %s samples, but y has %s." % (X.shape[0], y.shape[0])) if self.kernel == "precomputed" and X.shape[0] != X.shape[1]: raise ValueError("X.shape[0] should be equal to X.shape[1]") if sample_weight.shape[0] > 0 and sample_weight.shape[0] != X.shape[0]: raise ValueError("sample_weight and X have incompatible shapes: " "%r vs %r\n" "Note: Sparse matrices cannot be indexed w/" "boolean masks (use `indices=True` in CV)." % (sample_weight.shape, X.shape)) if (self.kernel in ['poly', 'rbf']) and (self.gamma == 0): # if custom gamma is not provided ... self._gamma = 1.0 / X.shape[1] else: self._gamma = self.gamma kernel = self.kernel if callable(kernel): kernel = 'precomputed' fit = self._sparse_fit if self._sparse else self._dense_fit if self.verbose: # pragma: no cover print('[LibSVM]', end='') seed = rnd.randint(np.iinfo('i').max) fit(X, y, sample_weight, solver_type, kernel, random_seed=seed) # see comment on the other call to np.iinfo in this file self.shape_fit_ = X.shape # In binary case, we need to flip the sign of coef, intercept and # decision function. Use self._intercept_ and self._dual_coef_ internally. self._intercept_ = self.intercept_.copy() self._dual_coef_ = self.dual_coef_ if self._impl in ['c_svc', 'nu_svc'] and len(self.classes_) == 2: self.intercept_ *= -1 self.dual_coef_ = -self.dual_coef_ return self def _validate_targets(self, y): """Validation of y and class_weight. Default implementation for SVR and one-class; overridden in BaseSVC. """ # XXX this is ugly. # Regression models should not have a class_weight_ attribute. self.class_weight_ = np.empty(0) return np.asarray(y, dtype=np.float64, order='C') def _warn_from_fit_status(self): assert self.fit_status_ in (0, 1) if self.fit_status_ == 1: warnings.warn('Solver terminated early (max_iter=%i).' ' Consider pre-processing your data with' ' StandardScaler or MinMaxScaler.' % self.max_iter, ConvergenceWarning) def _dense_fit(self, X, y, sample_weight, solver_type, kernel, random_seed): if callable(self.kernel): # you must store a reference to X to compute the kernel in predict # TODO: add keyword copy to copy on demand self.__Xfit = X X = self._compute_kernel(X) if X.shape[0] != X.shape[1]: raise ValueError("X.shape[0] should be equal to X.shape[1]") libsvm.set_verbosity_wrap(self.verbose) # we don't pass **self.get_params() to allow subclasses to # add other parameters to __init__ self.support_, self.support_vectors_, self.n_support_, \ self.dual_coef_, self.intercept_, self.probA_, \ self.probB_, self.fit_status_ = libsvm.fit( X, y, svm_type=solver_type, sample_weight=sample_weight, class_weight=self.class_weight_, kernel=kernel, C=self.C, nu=self.nu, probability=self.probability, degree=self.degree, shrinking=self.shrinking, tol=self.tol, cache_size=self.cache_size, coef0=self.coef0, gamma=self._gamma, epsilon=self.epsilon, max_iter=self.max_iter, random_seed=random_seed) self._warn_from_fit_status() def _sparse_fit(self, X, y, sample_weight, solver_type, kernel, random_seed): X.data = np.asarray(X.data, dtype=np.float64, order='C') X.sort_indices() kernel_type = self._sparse_kernels.index(kernel) libsvm_sparse.set_verbosity_wrap(self.verbose) self.support_, self.support_vectors_, dual_coef_data, \ self.intercept_, self.n_support_, \ self.probA_, self.probB_, self.fit_status_ = \ libsvm_sparse.libsvm_sparse_train( X.shape[1], X.data, X.indices, X.indptr, y, solver_type, kernel_type, self.degree, self._gamma, self.coef0, self.tol, self.C, self.class_weight_, sample_weight, self.nu, self.cache_size, self.epsilon, int(self.shrinking), int(self.probability), self.max_iter, random_seed) self._warn_from_fit_status() if hasattr(self, "classes_"): n_class = len(self.classes_) - 1 else: # regression n_class = 1 n_SV = self.support_vectors_.shape[0] dual_coef_indices = np.tile(np.arange(n_SV), n_class) dual_coef_indptr = np.arange(0, dual_coef_indices.size + 1, dual_coef_indices.size / n_class) self.dual_coef_ = sp.csr_matrix( (dual_coef_data, dual_coef_indices, dual_coef_indptr), (n_class, n_SV)) def predict(self, X): """Perform regression on samples in X. For an one-class model, +1 or -1 is returned. Parameters ---------- X : {array-like, sparse matrix}, shape (n_samples, n_features) For kernel="precomputed", the expected shape of X is (n_samples_test, n_samples_train). Returns ------- y_pred : array, shape (n_samples,) """ X = self._validate_for_predict(X) predict = self._sparse_predict if self._sparse else self._dense_predict return predict(X) def _dense_predict(self, X): n_samples, n_features = X.shape X = self._compute_kernel(X) if X.ndim == 1: X = check_array(X, order='C') kernel = self.kernel if callable(self.kernel): kernel = 'precomputed' if X.shape[1] != self.shape_fit_[0]: raise ValueError("X.shape[1] = %d should be equal to %d, " "the number of samples at training time" % (X.shape[1], self.shape_fit_[0])) svm_type = LIBSVM_IMPL.index(self._impl) return libsvm.predict( X, self.support_, self.support_vectors_, self.n_support_, self._dual_coef_, self._intercept_, self.probA_, self.probB_, svm_type=svm_type, kernel=kernel, degree=self.degree, coef0=self.coef0, gamma=self._gamma, cache_size=self.cache_size) def _sparse_predict(self, X): # Precondition: X is a csr_matrix of dtype np.float64. kernel = self.kernel if callable(kernel): kernel = 'precomputed' kernel_type = self._sparse_kernels.index(kernel) C = 0.0 # C is not useful here return libsvm_sparse.libsvm_sparse_predict( X.data, X.indices, X.indptr, self.support_vectors_.data, self.support_vectors_.indices, self.support_vectors_.indptr, self._dual_coef_.data, self._intercept_, LIBSVM_IMPL.index(self._impl), kernel_type, self.degree, self._gamma, self.coef0, self.tol, C, self.class_weight_, self.nu, self.epsilon, self.shrinking, self.probability, self.n_support_, self.probA_, self.probB_) def _compute_kernel(self, X): """Return the data transformed by a callable kernel""" if callable(self.kernel): # in the case of precomputed kernel given as a function, we # have to compute explicitly the kernel matrix kernel = self.kernel(X, self.__Xfit) if sp.issparse(kernel): kernel = kernel.toarray() X = np.asarray(kernel, dtype=np.float64, order='C') return X @deprecated(" and will be removed in 0.19") def decision_function(self, X): """Distance of the samples X to the separating hyperplane. Parameters ---------- X : array-like, shape (n_samples, n_features) For kernel="precomputed", the expected shape of X is [n_samples_test, n_samples_train]. Returns ------- X : array-like, shape (n_samples, n_class * (n_class-1) / 2) Returns the decision function of the sample for each class in the model. """ return self._decision_function(X) def _decision_function(self, X): """Distance of the samples X to the separating hyperplane. Parameters ---------- X : array-like, shape (n_samples, n_features) Returns ------- X : array-like, shape (n_samples, n_class * (n_class-1) / 2) Returns the decision function of the sample for each class in the model. """ # NOTE: _validate_for_predict contains check for is_fitted # hence must be placed before any other attributes are used. X = self._validate_for_predict(X) X = self._compute_kernel(X) if self._sparse: dec_func = self._sparse_decision_function(X) else: dec_func = self._dense_decision_function(X) # In binary case, we need to flip the sign of coef, intercept and # decision function. if self._impl in ['c_svc', 'nu_svc'] and len(self.classes_) == 2: return -dec_func.ravel() return dec_func def _dense_decision_function(self, X): X = check_array(X, dtype=np.float64, order="C") kernel = self.kernel if callable(kernel): kernel = 'precomputed' return libsvm.decision_function( X, self.support_, self.support_vectors_, self.n_support_, self._dual_coef_, self._intercept_, self.probA_, self.probB_, svm_type=LIBSVM_IMPL.index(self._impl), kernel=kernel, degree=self.degree, cache_size=self.cache_size, coef0=self.coef0, gamma=self._gamma) def _sparse_decision_function(self, X): X.data = np.asarray(X.data, dtype=np.float64, order='C') kernel = self.kernel if hasattr(kernel, '__call__'): kernel = 'precomputed' kernel_type = self._sparse_kernels.index(kernel) return libsvm_sparse.libsvm_sparse_decision_function( X.data, X.indices, X.indptr, self.support_vectors_.data, self.support_vectors_.indices, self.support_vectors_.indptr, self._dual_coef_.data, self._intercept_, LIBSVM_IMPL.index(self._impl), kernel_type, self.degree, self._gamma, self.coef0, self.tol, self.C, self.class_weight_, self.nu, self.epsilon, self.shrinking, self.probability, self.n_support_, self.probA_, self.probB_) def _validate_for_predict(self, X): check_is_fitted(self, 'support_') X = check_array(X, accept_sparse='csr', dtype=np.float64, order="C") if self._sparse and not sp.isspmatrix(X): X = sp.csr_matrix(X) if self._sparse: X.sort_indices() if sp.issparse(X) and not self._sparse and not callable(self.kernel): raise ValueError( "cannot use sparse input in %r trained on dense data" % type(self).__name__) n_samples, n_features = X.shape if self.kernel == "precomputed": if X.shape[1] != self.shape_fit_[0]: raise ValueError("X.shape[1] = %d should be equal to %d, " "the number of samples at training time" % (X.shape[1], self.shape_fit_[0])) elif n_features != self.shape_fit_[1]: raise ValueError("X.shape[1] = %d should be equal to %d, " "the number of features at training time" % (n_features, self.shape_fit_[1])) return X @property def coef_(self): if self.kernel != 'linear': raise ValueError('coef_ is only available when using a ' 'linear kernel') coef = self._get_coef() # coef_ being a read-only property, it's better to mark the value as # immutable to avoid hiding potential bugs for the unsuspecting user. if sp.issparse(coef): # sparse matrix do not have global flags coef.data.flags.writeable = False else: # regular dense array coef.flags.writeable = False return coef def _get_coef(self): return safe_sparse_dot(self._dual_coef_, self.support_vectors_) class BaseSVC(BaseLibSVM, ClassifierMixin): """ABC for LibSVM-based classifiers.""" def _validate_targets(self, y): y_ = column_or_1d(y, warn=True) cls, y = np.unique(y_, return_inverse=True) self.class_weight_ = compute_class_weight(self.class_weight, cls, y_) if len(cls) < 2: raise ValueError( "The number of classes has to be greater than one; got %d" % len(cls)) self.classes_ = cls return np.asarray(y, dtype=np.float64, order='C') def decision_function(self, X): """Distance of the samples X to the separating hyperplane. Parameters ---------- X : array-like, shape (n_samples, n_features) Returns ------- X : array-like, shape (n_samples, n_class * (n_class-1) / 2) Returns the decision function of the sample for each class in the model. """ return self._decision_function(X) def predict(self, X): """Perform classification on samples in X. For an one-class model, +1 or -1 is returned. Parameters ---------- X : {array-like, sparse matrix}, shape (n_samples, n_features) For kernel="precomputed", the expected shape of X is [n_samples_test, n_samples_train] Returns ------- y_pred : array, shape (n_samples,) Class labels for samples in X. """ y = super(BaseSVC, self).predict(X) return self.classes_.take(np.asarray(y, dtype=np.intp)) # Hacky way of getting predict_proba to raise an AttributeError when # probability=False using properties. Do not use this in new code; when # probabilities are not available depending on a setting, introduce two # estimators. def _check_proba(self): if not self.probability: raise AttributeError("predict_proba is not available when" " probability=%r" % self.probability) if self._impl not in ('c_svc', 'nu_svc'): raise AttributeError("predict_proba only implemented for SVC" " and NuSVC") @property def predict_proba(self): """Compute probabilities of possible outcomes for samples in X. The model need to have probability information computed at training time: fit with attribute `probability` set to True. Parameters ---------- X : array-like, shape (n_samples, n_features) For kernel="precomputed", the expected shape of X is [n_samples_test, n_samples_train] Returns ------- T : array-like, shape (n_samples, n_classes) Returns the probability of the sample for each class in the model. The columns correspond to the classes in sorted order, as they appear in the attribute `classes_`. Notes ----- The probability model is created using cross validation, so the results can be slightly different than those obtained by predict. Also, it will produce meaningless results on very small datasets. """ self._check_proba() return self._predict_proba def _predict_proba(self, X): X = self._validate_for_predict(X) pred_proba = (self._sparse_predict_proba if self._sparse else self._dense_predict_proba) return pred_proba(X) @property def predict_log_proba(self): """Compute log probabilities of possible outcomes for samples in X. The model need to have probability information computed at training time: fit with attribute `probability` set to True. Parameters ---------- X : array-like, shape (n_samples, n_features) For kernel="precomputed", the expected shape of X is [n_samples_test, n_samples_train] Returns ------- T : array-like, shape (n_samples, n_classes) Returns the log-probabilities of the sample for each class in the model. The columns correspond to the classes in sorted order, as they appear in the attribute `classes_`. Notes ----- The probability model is created using cross validation, so the results can be slightly different than those obtained by predict. Also, it will produce meaningless results on very small datasets. """ self._check_proba() return self._predict_log_proba def _predict_log_proba(self, X): return np.log(self.predict_proba(X)) def _dense_predict_proba(self, X): X = self._compute_kernel(X) kernel = self.kernel if callable(kernel): kernel = 'precomputed' svm_type = LIBSVM_IMPL.index(self._impl) pprob = libsvm.predict_proba( X, self.support_, self.support_vectors_, self.n_support_, self._dual_coef_, self._intercept_, self.probA_, self.probB_, svm_type=svm_type, kernel=kernel, degree=self.degree, cache_size=self.cache_size, coef0=self.coef0, gamma=self._gamma) return pprob def _sparse_predict_proba(self, X): X.data = np.asarray(X.data, dtype=np.float64, order='C') kernel = self.kernel if callable(kernel): kernel = 'precomputed' kernel_type = self._sparse_kernels.index(kernel) return libsvm_sparse.libsvm_sparse_predict_proba( X.data, X.indices, X.indptr, self.support_vectors_.data, self.support_vectors_.indices, self.support_vectors_.indptr, self._dual_coef_.data, self._intercept_, LIBSVM_IMPL.index(self._impl), kernel_type, self.degree, self._gamma, self.coef0, self.tol, self.C, self.class_weight_, self.nu, self.epsilon, self.shrinking, self.probability, self.n_support_, self.probA_, self.probB_) def _get_coef(self): if self.dual_coef_.shape[0] == 1: # binary classifier coef = safe_sparse_dot(self.dual_coef_, self.support_vectors_) else: # 1vs1 classifier coef = _one_vs_one_coef(self.dual_coef_, self.n_support_, self.support_vectors_) if sp.issparse(coef[0]): coef = sp.vstack(coef).tocsr() else: coef = np.vstack(coef) return coef def _get_liblinear_solver_type(multi_class, penalty, loss, dual): """Find the liblinear magic number for the solver. This number depends on the values of the following attributes: - multi_class - penalty - loss - dual The same number is also internally used by LibLinear to determine which solver to use. """ # nested dicts containing level 1: available loss functions, # level2: available penalties for the given loss functin, # level3: wether the dual solver is available for the specified # combination of loss function and penalty _solver_type_dict = { 'logistic_regression': { 'l1': {False: 6}, 'l2': {False: 0, True: 7}}, 'hinge': { 'l2': {True: 3}}, 'squared_hinge': { 'l1': {False: 5}, 'l2': {False: 2, True: 1}}, 'epsilon_insensitive': { 'l2': {True: 13}}, 'squared_epsilon_insensitive': { 'l2': {False: 11, True: 12}}, 'crammer_singer': 4 } if multi_class == 'crammer_singer': return _solver_type_dict[multi_class] elif multi_class != 'ovr': raise ValueError("`multi_class` must be one of `ovr`, " "`crammer_singer`, got %r" % multi_class) # FIXME loss.lower() --> loss in 0.18 _solver_pen = _solver_type_dict.get(loss.lower(), None) if _solver_pen is None: error_string = ("loss='%s' is not supported" % loss) else: # FIME penalty.lower() --> penalty in 0.18 _solver_dual = _solver_pen.get(penalty.lower(), None) if _solver_dual is None: error_string = ("The combination of penalty='%s'" "and loss='%s' is not supported" % (penalty, loss)) else: solver_num = _solver_dual.get(dual, None) if solver_num is None: error_string = ("loss='%s' and penalty='%s'" "are not supported when dual=%s" % (penalty, loss, dual)) else: return solver_num raise ValueError('Unsupported set of arguments: %s, ' 'Parameters: penalty=%r, loss=%r, dual=%r' % (error_string, penalty, loss, dual)) def _fit_liblinear(X, y, C, fit_intercept, intercept_scaling, class_weight, penalty, dual, verbose, max_iter, tol, random_state=None, multi_class='ovr', loss='logistic_regression', epsilon=0.1): """Used by Logistic Regression (and CV) and LinearSVC. Preprocessing is done in this function before supplying it to liblinear. Parameters ---------- X : {array-like, sparse matrix}, shape (n_samples, n_features) Training vector, where n_samples in the number of samples and n_features is the number of features. y : array-like, shape (n_samples,) Target vector relative to X C : float Inverse of cross-validation parameter. Lower the C, the more the penalization. fit_intercept : bool Whether or not to fit the intercept, that is to add a intercept term to the decision function. intercept_scaling : float LibLinear internally penalizes the intercept and this term is subject to regularization just like the other terms of the feature vector. In order to avoid this, one should increase the intercept_scaling. such that the feature vector becomes [x, intercept_scaling]. class_weight : {dict, 'auto'}, optional Weight assigned to each class. If class_weight provided is 'auto', then the weights provided are inverses of the frequency in the target vector. penalty : str, {'l1', 'l2'} The norm of the penalty used in regularization. dual : bool Dual or primal formulation, verbose : int Set verbose to any positive number for verbosity. max_iter : int Number of iterations. tol : float Stopping condition. random_state : int seed, RandomState instance, or None (default) The seed of the pseudo random number generator to use when shuffling the data. multi_class : str, {'ovr', 'crammer_singer'} `ovr` trains n_classes one-vs-rest classifiers, while `crammer_singer` optimizes a joint objective over all classes. While `crammer_singer` is interesting from an theoretical perspective as it is consistent it is seldom used in practice and rarely leads to better accuracy and is more expensive to compute. If `crammer_singer` is chosen, the options loss, penalty and dual will be ignored. loss : str, {'logistic_regression', 'hinge', 'squared_hinge', 'epsilon_insensitive', 'squared_epsilon_insensitive} The loss function used to fit the model. epsilon : float, optional (default=0.1) Epsilon parameter in the epsilon-insensitive loss function. Note that the value of this parameter depends on the scale of the target variable y. If unsure, set epsilon=0. Returns ------- coef_ : ndarray, shape (n_features, n_features + 1) The coefficent vector got by minimizing the objective function. intercept_ : float The intercept term added to the vector. n_iter_ : int Maximum number of iterations run across all classes. """ # FIXME Remove case insensitivity in 0.18 --------------------- loss_l, penalty_l = loss.lower(), penalty.lower() msg = ("loss='%s' has been deprecated in favor of " "loss='%s' as of 0.16. Backward compatibility" " for the uppercase notation will be removed in %s") if (not loss.islower()) and loss_l not in ('l1', 'l2'): warnings.warn(msg % (loss, loss_l, "0.18"), DeprecationWarning) if not penalty.islower(): warnings.warn(msg.replace("loss", "penalty") % (penalty, penalty_l, "0.18"), DeprecationWarning) # ------------------------------------------------------------- # FIXME loss_l --> loss in 0.18 if loss_l not in ['epsilon_insensitive', 'squared_epsilon_insensitive']: enc = LabelEncoder() y_ind = enc.fit_transform(y) classes_ = enc.classes_ if len(classes_) < 2: raise ValueError("This solver needs samples of at least 2 classes" " in the data, but the data contains only one" " class: %r" % classes_[0]) class_weight_ = compute_class_weight(class_weight, classes_, y) else: class_weight_ = np.empty(0, dtype=np.float) y_ind = y liblinear.set_verbosity_wrap(verbose) rnd = check_random_state(random_state) if verbose: print('[LibLinear]', end='') # LinearSVC breaks when intercept_scaling is <= 0 bias = -1.0 if fit_intercept: if intercept_scaling <= 0: raise ValueError("Intercept scaling is %r but needs to be greater than 0." " To disable fitting an intercept," " set fit_intercept=False." % intercept_scaling) else: bias = intercept_scaling libsvm.set_verbosity_wrap(verbose) libsvm_sparse.set_verbosity_wrap(verbose) liblinear.set_verbosity_wrap(verbose) # LibLinear wants targets as doubles, even for classification y_ind = np.asarray(y_ind, dtype=np.float64).ravel() solver_type = _get_liblinear_solver_type(multi_class, penalty, loss, dual) raw_coef_, n_iter_ = liblinear.train_wrap( X, y_ind, sp.isspmatrix(X), solver_type, tol, bias, C, class_weight_, max_iter, rnd.randint(np.iinfo('i').max), epsilon) # Regarding rnd.randint(..) in the above signature: # seed for srand in range [0..INT_MAX); due to limitations in Numpy # on 32-bit platforms, we can't get to the UINT_MAX limit that # srand supports n_iter_ = max(n_iter_) if n_iter_ >= max_iter and verbose > 0: warnings.warn("Liblinear failed to converge, increase " "the number of iterations.", ConvergenceWarning) if fit_intercept: coef_ = raw_coef_[:, :-1] intercept_ = intercept_scaling * raw_coef_[:, -1] else: coef_ = raw_coef_ intercept_ = 0. return coef_, intercept_, n_iter_
bsd-3-clause
DistrictDataLabs/yellowbrick
yellowbrick/datasets/path.py
1
7888
# yellowbrick.datasets.path # Helper functions for looking up dataset paths. # # Author: Benjamin Bengfort # Created: Thu Jul 26 14:10:51 2018 -0400 # # Copyright (C) 2018 The scikit-yb developers # For license information, see LICENSE.txt # # ID: path.py [7082742] benjamin@bengfort.com $ """ Helper functions for looking up dataset paths. """ ########################################################################## ## Imports ########################################################################## import os import shutil from .signature import sha256sum from yellowbrick.exceptions import DatasetsError ########################################################################## ## Fixtures ########################################################################## FIXTURES = os.path.join(os.path.dirname(__file__), "fixtures") ########################################################################## ## Dataset path utilities ########################################################################## def get_data_home(path=None): """ Return the path of the Yellowbrick data directory. This folder is used by dataset loaders to avoid downloading data several times. By default, this folder is colocated with the code in the install directory so that data shipped with the package can be easily located. Alternatively it can be set by the ``$YELLOWBRICK_DATA`` environment variable, or programmatically by giving a folder path. Note that the ``'~'`` symbol is expanded to the user home directory, and environment variables are also expanded when resolving the path. """ if path is None: path = os.environ.get("YELLOWBRICK_DATA", FIXTURES) path = os.path.expanduser(path) path = os.path.expandvars(path) if not os.path.exists(path): os.makedirs(path) return path def find_dataset_path(dataset, data_home=None, fname=None, ext=".csv.gz", raises=True): """ Looks up the path to the dataset specified in the data home directory, which is found using the ``get_data_home`` function. By default data home is colocated with the code, but can be modified with the YELLOWBRICK_DATA environment variable, or passing in a different directory. The file returned will be by default, the name of the dataset in compressed CSV format. Other files and extensions can be passed in to locate other data types or auxilliary files. If the dataset is not found a ``DatasetsError`` is raised by default. Parameters ---------- dataset : str The name of the dataset; should either be a folder in data home or specified in the yellowbrick.datasets.DATASETS variable. data_home : str, optional The path on disk where data is stored. If not passed in, it is looked up from YELLOWBRICK_DATA or the default returned by ``get_data_home``. fname : str, optional The filename to look up in the dataset path, by default it will be the name of the dataset. The fname must include an extension. ext : str, default: ".csv.gz" The extension of the data to look up in the dataset path, if the fname is specified then the ext parameter is ignored. If ext is None then the directory of the dataset will be returned. raises : bool, default: True If the path does not exist, raises a DatasetsError unless this flag is set to False, at which point None is returned (e.g. for checking if the path exists or not). Returns ------- path : str or None A path to the requested file, guaranteed to exist if an exception is not raised during processing of the request (unless None is returned). raises : DatasetsError If raise is True and the path does not exist, raises a DatasetsError. """ # Figure out the root directory of the datasets data_home = get_data_home(data_home) # Figure out the relative path to the dataset if fname is None: if ext is None: path = os.path.join(data_home, dataset) else: path = os.path.join(data_home, dataset, "{}{}".format(dataset, ext)) else: path = os.path.join(data_home, dataset, fname) # Determine if the path exists if not os.path.exists(path): # Suppress exceptions if required if not raises: return None raise DatasetsError( ("could not find dataset at {} - does it need to be downloaded?").format( path ) ) return path def dataset_exists(dataset, data_home=None): """ Checks to see if a directory with the name of the specified dataset exists in the data home directory, found with ``get_data_home``. Parameters ---------- dataset : str The name of the dataset; should either be a folder in data home or specified in the yellowbrick.datasets.DATASETS variable. data_home : str, optional The path on disk where data is stored. If not passed in, it is looked up from YELLOWBRICK_DATA or the default returned by ``get_data_home``. Returns ------- exists : bool If a folder with the dataset name is in the data home directory. """ data_home = get_data_home(data_home) path = os.path.join(data_home, dataset) return os.path.exists(path) and os.path.isdir(path) def dataset_archive(dataset, signature, data_home=None, ext=".zip"): """ Checks to see if the dataset archive file exists in the data home directory, found with ``get_data_home``. By specifying the signature, this function also checks to see if the archive is the latest version by comparing the sha256sum of the local archive with the specified signature. Parameters ---------- dataset : str The name of the dataset; should either be a folder in data home or specified in the yellowbrick.datasets.DATASETS variable. signature : str The SHA 256 signature of the dataset, used to determine if the archive is the latest version of the dataset or not. data_home : str, optional The path on disk where data is stored. If not passed in, it is looked up from YELLOWBRICK_DATA or the default returned by ``get_data_home``. ext : str, default: ".zip" The extension of the archive file. Returns ------- exists : bool True if the dataset archive exists and is the latest version. """ data_home = get_data_home(data_home) path = os.path.join(data_home, dataset + ext) if os.path.exists(path) and os.path.isfile(path): return sha256sum(path) == signature return False def cleanup_dataset(dataset, data_home=None, ext=".zip"): """ Removes the dataset directory and archive file from the data home directory. Parameters ---------- dataset : str The name of the dataset; should either be a folder in data home or specified in the yellowbrick.datasets.DATASETS variable. data_home : str, optional The path on disk where data is stored. If not passed in, it is looked up from YELLOWBRICK_DATA or the default returned by ``get_data_home``. ext : str, default: ".zip" The extension of the archive file. Returns ------- removed : int The number of objects removed from data_home. """ removed = 0 data_home = get_data_home(data_home) # Paths to remove datadir = os.path.join(data_home, dataset) archive = os.path.join(data_home, dataset + ext) # Remove directory and contents if os.path.exists(datadir): shutil.rmtree(datadir) removed += 1 # Remove the archive file if os.path.exists(archive): os.remove(archive) removed += 1 return removed
apache-2.0
openconnectome/ndio
ndio/remote/metadata.py
2
7027
from __future__ import absolute_import import ndio import requests import os import numpy from io import BytesIO import zlib import tempfile import blosc import h5py from .remote_utils import remote_utils from .Remote import Remote from .errors import * import ndio.ramon as ramon from six.moves import range import six from functools import wraps try: import urllib.request as urllib2 except ImportError: import urllib2 class metadata(): """ Metadata helper class. """ def __init__(self, user_token): """ Initializes metadata. Arguments: user_token (str): Authentication token for user. """ self.remote_utils = remote_utils(user_token) # SECTION: # Metadata def get_public_tokens(self): """ Get a list of public tokens available on this server. Arguments: None Returns: str[]: list of public tokens """ r = self.remote_utils.get_url(self.url() + "public_tokens/") return r.json() def get_public_datasets(self): """ NOTE: VERY SLOW! Get a list of public datasets. Different than public tokens! Arguments: None Returns: str[]: list of public datasets """ return list(self.get_public_datasets_and_tokens().keys()) def get_public_datasets_and_tokens(self): """ NOTE: VERY SLOW! Get a dictionary relating key:dataset to value:[tokens] that rely on that dataset. Arguments: None Returns: dict: relating key:dataset to value:[tokens] """ datasets = {} tokens = self.get_public_tokens() for t in tokens: dataset = self.get_token_dataset(t) if dataset in datasets: datasets[dataset].append(t) else: datasets[dataset] = [t] return datasets def get_token_dataset(self, token): """ Get the dataset for a given token. Arguments: token (str): The token to inspect Returns: str: The name of the dataset """ return self.get_proj_info(token)['dataset']['description'] def get_proj_info(self, token): """ Return the project info for a given token. Arguments: token (str): Token to return information for Returns: JSON: representation of proj_info """ r = self.remote_utils.get_url(self.url() + "{}/info/".format(token)) return r.json() def get_metadata(self, token): """ An alias for get_proj_info. """ return self.get_proj_info(token) def get_channels(self, token): """ Wraps get_proj_info to return a dictionary of just the channels of a given project. Arguments: token (str): Token to return channels for Returns: JSON: dictionary of channels. """ return self.get_proj_info(token)['channels'] def get_image_size(self, token, resolution=0): """ Return the size of the volume (3D). Convenient for when you want to download the entirety of a dataset. Arguments: token (str): The token for which to find the dataset image bounds resolution (int : 0): The resolution at which to get image bounds. Defaults to 0, to get the largest area available. Returns: int[3]: The size of the bounds. Should == get_volume.shape Raises: RemoteDataNotFoundError: If the token is invalid, or if the metadata at that resolution is unavailable in projinfo. """ info = self.get_proj_info(token) res = str(resolution) if res not in info['dataset']['imagesize']: raise RemoteDataNotFoundError("Resolution " + res + " is not available.") return info['dataset']['imagesize'][str(resolution)] def set_metadata(self, token, data): """ Insert new metadata into the OCP metadata database. Arguments: token (str): Token of the datum to set data (str): A dictionary to insert as metadata. Include `secret`. Returns: json: Info of the inserted ID (convenience) or an error message. Throws: RemoteDataUploadError: If the token is already populated, or if there is an issue with your specified `secret` key. """ req = requests.post(self.meta_url("metadata/ocp/set/" + token), json=data, verify=False) if req.status_code != 200: raise RemoteDataUploadError( "Could not upload metadata: " + req.json()['message'] ) return req.json() def get_subvolumes(self, token): """ Return a list of subvolumes taken from LIMS, if available. Arguments: token (str): The token to read from in LIMS Returns: dict: or None if unavailable """ md = self.get_metadata(token)['metadata'] if 'subvolumes' in md: return md['subvolumes'] else: return None def add_subvolume(self, token, channel, secret, x_start, x_stop, y_start, y_stop, z_start, z_stop, resolution, title, notes): """ Adds a new subvolume to a token/channel. Arguments: token (str): The token to write to in LIMS channel (str): Channel to add in the subvolume. Can be `None` x_start (int): Start in x dimension x_stop (int): Stop in x dimension y_start (int): Start in y dimension y_stop (int): Stop in y dimension z_start (int): Start in z dimension z_stop (int): Stop in z dimension resolution (int): The resolution at which this subvolume is seen title (str): The title to set for the subvolume notes (str): Optional extra thoughts on the subvolume Returns: boolean: success """ md = self.get_metadata(token)['metadata'] if 'subvolumes' in md: subvols = md['subvolumes'] else: subvols = [] subvols.append({ 'token': token, 'channel': channel, 'x_start': x_start, 'x_stop': x_stop, 'y_start': y_start, 'y_stop': y_stop, 'z_start': z_start, 'z_stop': z_stop, 'resolution': resolution, 'title': title, 'notes': notes }) return self.set_metadata(token, { 'secret': secret, 'subvolumes': subvols })
apache-2.0
ivankreso/stereo-vision
scripts/run_sgm_batch_sunando_sengupta.py
1
3239
#!/usr/bin/python import os import subprocess from os.path import isfile, join # KITTI 02 #start_num = 0 #stop_num = 4660 #frame_step = 1 #left_prefix = "/image_0/" #right_prefix = "/image_1/" #left_suffix = ".png" #right_suffix = ".png" #out_fname = "kitti_02_lst.xml" #start_num = 0 ##stop_num = 1100 #frame_step = 1 #left_suffix = "_10.png" #right_suffix = "_10.png" # ##data_folder = "/home/kivan/Projects/datasets/KITTI/sequences_gray/07/" ##data_folder = "/home/kivan/Projects/datasets/KITTI/dense_stereo/training/" ##stop_num = 193 #data_folder = "/home/kivan/Projects/datasets/KITTI/dense_stereo/testing/" #stop_num = 194 # ##left_prefix = "/colored_0/" ##right_prefix = "/colored_1/" ##out_folder = "/home/kivan/Projects/datasets/results/dense_stereo/spsstereo/kitti/data" ##binary_path = "/home/kivan/Projects/cv-stereo/build/spsstereo/release/spsstereo" # #left_prefix = "/image_0/" #right_prefix = "/image_1/" ##out_folder = "/home/kivan/Projects/datasets/results/dense_stereo/kitti/testing/our_sgm_5_60/data/" ##binary_path = "/home/kivan/Projects/cv-stereo/build/our_sgm/release/our_sgm" binary_path = "/home/kivan/source/cv-stereo/build/spsstereo/release/spsstereo" #binary_path = "/home/kivan/source/cv-stereo/build/sgm_single/release/sgm_single" #data_folder = "/home/kivan/datasets/KITTI/segmentation/semantic_segmentation/Training_00/RGB/" #out_folder = "/home/kivan/datasets/KITTI/segmentation/semantic_segmentation/Training_00/RGB/depth" #img_right_dir = "/home/kivan/datasets/KITTI/sequences_color/00/image_3/" #data_folder = "/home/kivan/datasets/KITTI/segmentation/semantic_segmentation/Validation_07/RGB/" #out_folder = "/home/kivan/datasets/KITTI/segmentation/semantic_segmentation/Validation_07/RGB/depth" #img_right_dir = "/home/kivan/datasets/KITTI/sequences_color/07/image_3/" #data_folder = "/home/kivan/datasets/KITTI/segmentation/semantic_segmentation/sunando_sengupta/train/images/" #out_folder = "/home/kivan/datasets/KITTI/segmentation/semantic_segmentation/sunando_sengupta/train/depth/" data_folder = "/home/kivan/datasets/KITTI/segmentation/semantic_segmentation/sunando_sengupta/test/images/" out_folder = "/home/kivan/datasets/KITTI/segmentation/semantic_segmentation/sunando_sengupta/test/depth/" img_right_dir = "/home/kivan/datasets/KITTI/sequences_color/" if not os.path.exists(out_folder): os.makedirs(out_folder) else: print("WARNING: path exists - ", out_folder) left_dir = data_folder + "/left/" right_dir = data_folder + "/right/" filelist = [f for f in os.listdir(left_dir) if isfile(join(left_dir,f))] for filename in filelist: print(filename) #num_str = "%06d" % (i) #num_str = "%010d" % (i) img_left = left_dir + filename img_right = right_dir + filename right_src_img = img_right_dir + filename[0:2] + '/image_3/' + filename[3:] print(right_src_img) subprocess.call(["/bin/cp", right_src_img, img_right]) subprocess.call([binary_path, img_left, img_right, out_folder]) #subprocess.call([binary_path, img_left, img_right, out_folder, "5", "60"]) #cmd = binary_path + " " + img_left + " " + img_right + " " + out_folder #subprocess.call([cmd], shell=True) #ofile.write("</imagelist>\n</opencv_storage>") #ofile.close()
bsd-3-clause
thientu/scikit-learn
examples/svm/plot_iris.py
223
3252
""" ================================================== Plot different SVM classifiers in the iris dataset ================================================== Comparison of different linear SVM classifiers on a 2D projection of the iris dataset. We only consider the first 2 features of this dataset: - Sepal length - Sepal width This example shows how to plot the decision surface for four SVM classifiers with different kernels. The linear models ``LinearSVC()`` and ``SVC(kernel='linear')`` yield slightly different decision boundaries. This can be a consequence of the following differences: - ``LinearSVC`` minimizes the squared hinge loss while ``SVC`` minimizes the regular hinge loss. - ``LinearSVC`` uses the One-vs-All (also known as One-vs-Rest) multiclass reduction while ``SVC`` uses the One-vs-One multiclass reduction. Both linear models have linear decision boundaries (intersecting hyperplanes) while the non-linear kernel models (polynomial or Gaussian RBF) have more flexible non-linear decision boundaries with shapes that depend on the kind of kernel and its parameters. .. NOTE:: while plotting the decision function of classifiers for toy 2D datasets can help get an intuitive understanding of their respective expressive power, be aware that those intuitions don't always generalize to more realistic high-dimensional problems. """ print(__doc__) import numpy as np import matplotlib.pyplot as plt from sklearn import svm, datasets # import some data to play with iris = datasets.load_iris() X = iris.data[:, :2] # we only take the first two features. We could # avoid this ugly slicing by using a two-dim dataset y = iris.target h = .02 # step size in the mesh # we create an instance of SVM and fit out data. We do not scale our # data since we want to plot the support vectors C = 1.0 # SVM regularization parameter svc = svm.SVC(kernel='linear', C=C).fit(X, y) rbf_svc = svm.SVC(kernel='rbf', gamma=0.7, C=C).fit(X, y) poly_svc = svm.SVC(kernel='poly', degree=3, C=C).fit(X, y) lin_svc = svm.LinearSVC(C=C).fit(X, y) # create a mesh to plot in x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1 y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1 xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h)) # title for the plots titles = ['SVC with linear kernel', 'LinearSVC (linear kernel)', 'SVC with RBF kernel', 'SVC with polynomial (degree 3) kernel'] for i, clf in enumerate((svc, lin_svc, rbf_svc, poly_svc)): # Plot the decision boundary. For that, we will assign a color to each # point in the mesh [x_min, m_max]x[y_min, y_max]. plt.subplot(2, 2, i + 1) plt.subplots_adjust(wspace=0.4, hspace=0.4) Z = clf.predict(np.c_[xx.ravel(), yy.ravel()]) # Put the result into a color plot Z = Z.reshape(xx.shape) plt.contourf(xx, yy, Z, cmap=plt.cm.Paired, alpha=0.8) # Plot also the training points plt.scatter(X[:, 0], X[:, 1], c=y, cmap=plt.cm.Paired) plt.xlabel('Sepal length') plt.ylabel('Sepal width') plt.xlim(xx.min(), xx.max()) plt.ylim(yy.min(), yy.max()) plt.xticks(()) plt.yticks(()) plt.title(titles[i]) plt.show()
bsd-3-clause
Fireblend/scikit-learn
examples/svm/plot_iris.py
223
3252
""" ================================================== Plot different SVM classifiers in the iris dataset ================================================== Comparison of different linear SVM classifiers on a 2D projection of the iris dataset. We only consider the first 2 features of this dataset: - Sepal length - Sepal width This example shows how to plot the decision surface for four SVM classifiers with different kernels. The linear models ``LinearSVC()`` and ``SVC(kernel='linear')`` yield slightly different decision boundaries. This can be a consequence of the following differences: - ``LinearSVC`` minimizes the squared hinge loss while ``SVC`` minimizes the regular hinge loss. - ``LinearSVC`` uses the One-vs-All (also known as One-vs-Rest) multiclass reduction while ``SVC`` uses the One-vs-One multiclass reduction. Both linear models have linear decision boundaries (intersecting hyperplanes) while the non-linear kernel models (polynomial or Gaussian RBF) have more flexible non-linear decision boundaries with shapes that depend on the kind of kernel and its parameters. .. NOTE:: while plotting the decision function of classifiers for toy 2D datasets can help get an intuitive understanding of their respective expressive power, be aware that those intuitions don't always generalize to more realistic high-dimensional problems. """ print(__doc__) import numpy as np import matplotlib.pyplot as plt from sklearn import svm, datasets # import some data to play with iris = datasets.load_iris() X = iris.data[:, :2] # we only take the first two features. We could # avoid this ugly slicing by using a two-dim dataset y = iris.target h = .02 # step size in the mesh # we create an instance of SVM and fit out data. We do not scale our # data since we want to plot the support vectors C = 1.0 # SVM regularization parameter svc = svm.SVC(kernel='linear', C=C).fit(X, y) rbf_svc = svm.SVC(kernel='rbf', gamma=0.7, C=C).fit(X, y) poly_svc = svm.SVC(kernel='poly', degree=3, C=C).fit(X, y) lin_svc = svm.LinearSVC(C=C).fit(X, y) # create a mesh to plot in x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1 y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1 xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h)) # title for the plots titles = ['SVC with linear kernel', 'LinearSVC (linear kernel)', 'SVC with RBF kernel', 'SVC with polynomial (degree 3) kernel'] for i, clf in enumerate((svc, lin_svc, rbf_svc, poly_svc)): # Plot the decision boundary. For that, we will assign a color to each # point in the mesh [x_min, m_max]x[y_min, y_max]. plt.subplot(2, 2, i + 1) plt.subplots_adjust(wspace=0.4, hspace=0.4) Z = clf.predict(np.c_[xx.ravel(), yy.ravel()]) # Put the result into a color plot Z = Z.reshape(xx.shape) plt.contourf(xx, yy, Z, cmap=plt.cm.Paired, alpha=0.8) # Plot also the training points plt.scatter(X[:, 0], X[:, 1], c=y, cmap=plt.cm.Paired) plt.xlabel('Sepal length') plt.ylabel('Sepal width') plt.xlim(xx.min(), xx.max()) plt.ylim(yy.min(), yy.max()) plt.xticks(()) plt.yticks(()) plt.title(titles[i]) plt.show()
bsd-3-clause
thientu/scikit-learn
examples/linear_model/plot_sgd_penalties.py
248
1563
""" ============== SGD: Penalties ============== Plot the contours of the three penalties. All of the above are supported by :class:`sklearn.linear_model.stochastic_gradient`. """ from __future__ import division print(__doc__) import numpy as np import matplotlib.pyplot as plt def l1(xs): return np.array([np.sqrt((1 - np.sqrt(x ** 2.0)) ** 2.0) for x in xs]) def l2(xs): return np.array([np.sqrt(1.0 - x ** 2.0) for x in xs]) def el(xs, z): return np.array([(2 - 2 * x - 2 * z + 4 * x * z - (4 * z ** 2 - 8 * x * z ** 2 + 8 * x ** 2 * z ** 2 - 16 * x ** 2 * z ** 3 + 8 * x * z ** 3 + 4 * x ** 2 * z ** 4) ** (1. / 2) - 2 * x * z ** 2) / (2 - 4 * z) for x in xs]) def cross(ext): plt.plot([-ext, ext], [0, 0], "k-") plt.plot([0, 0], [-ext, ext], "k-") xs = np.linspace(0, 1, 100) alpha = 0.501 # 0.5 division throuh zero cross(1.2) plt.plot(xs, l1(xs), "r-", label="L1") plt.plot(xs, -1.0 * l1(xs), "r-") plt.plot(-1 * xs, l1(xs), "r-") plt.plot(-1 * xs, -1.0 * l1(xs), "r-") plt.plot(xs, l2(xs), "b-", label="L2") plt.plot(xs, -1.0 * l2(xs), "b-") plt.plot(-1 * xs, l2(xs), "b-") plt.plot(-1 * xs, -1.0 * l2(xs), "b-") plt.plot(xs, el(xs, alpha), "y-", label="Elastic Net") plt.plot(xs, -1.0 * el(xs, alpha), "y-") plt.plot(-1 * xs, el(xs, alpha), "y-") plt.plot(-1 * xs, -1.0 * el(xs, alpha), "y-") plt.xlabel(r"$w_0$") plt.ylabel(r"$w_1$") plt.legend() plt.axis("equal") plt.show()
bsd-3-clause
thientu/scikit-learn
examples/model_selection/plot_confusion_matrix.py
243
2496
""" ================ Confusion matrix ================ Example of confusion matrix usage to evaluate the quality of the output of a classifier on the iris data set. The diagonal elements represent the number of points for which the predicted label is equal to the true label, while off-diagonal elements are those that are mislabeled by the classifier. The higher the diagonal values of the confusion matrix the better, indicating many correct predictions. The figures show the confusion matrix with and without normalization by class support size (number of elements in each class). This kind of normalization can be interesting in case of class imbalance to have a more visual interpretation of which class is being misclassified. Here the results are not as good as they could be as our choice for the regularization parameter C was not the best. In real life applications this parameter is usually chosen using :ref:`grid_search`. """ print(__doc__) import numpy as np import matplotlib.pyplot as plt from sklearn import svm, datasets from sklearn.cross_validation import train_test_split from sklearn.metrics import confusion_matrix # import some data to play with iris = datasets.load_iris() X = iris.data y = iris.target # Split the data into a training set and a test set X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0) # Run classifier, using a model that is too regularized (C too low) to see # the impact on the results classifier = svm.SVC(kernel='linear', C=0.01) y_pred = classifier.fit(X_train, y_train).predict(X_test) def plot_confusion_matrix(cm, title='Confusion matrix', cmap=plt.cm.Blues): plt.imshow(cm, interpolation='nearest', cmap=cmap) plt.title(title) plt.colorbar() tick_marks = np.arange(len(iris.target_names)) plt.xticks(tick_marks, iris.target_names, rotation=45) plt.yticks(tick_marks, iris.target_names) plt.tight_layout() plt.ylabel('True label') plt.xlabel('Predicted label') # Compute confusion matrix cm = confusion_matrix(y_test, y_pred) np.set_printoptions(precision=2) print('Confusion matrix, without normalization') print(cm) plt.figure() plot_confusion_matrix(cm) # Normalize the confusion matrix by row (i.e by the number of samples # in each class) cm_normalized = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis] print('Normalized confusion matrix') print(cm_normalized) plt.figure() plot_confusion_matrix(cm_normalized, title='Normalized confusion matrix') plt.show()
bsd-3-clause
ningchi/scikit-learn
sklearn/linear_model/tests/test_ridge.py
14
20805
import numpy as np import scipy.sparse as sp from scipy import linalg from sklearn.utils.testing import assert_true from sklearn.utils.testing import assert_almost_equal from sklearn.utils.testing import assert_array_almost_equal from sklearn.utils.testing import assert_equal from sklearn.utils.testing import assert_array_equal from sklearn.utils.testing import assert_greater from sklearn.utils.testing import assert_raises from sklearn.utils.testing import assert_raise_message from sklearn.utils.testing import ignore_warnings from sklearn import datasets from sklearn.metrics import mean_squared_error from sklearn.metrics import make_scorer from sklearn.metrics import get_scorer from sklearn.linear_model.base import LinearRegression from sklearn.linear_model.ridge import ridge_regression from sklearn.linear_model.ridge import Ridge from sklearn.linear_model.ridge import _RidgeGCV from sklearn.linear_model.ridge import RidgeCV from sklearn.linear_model.ridge import RidgeClassifier from sklearn.linear_model.ridge import RidgeClassifierCV from sklearn.linear_model.ridge import _solve_cholesky from sklearn.linear_model.ridge import _solve_cholesky_kernel from sklearn.cross_validation import KFold diabetes = datasets.load_diabetes() X_diabetes, y_diabetes = diabetes.data, diabetes.target ind = np.arange(X_diabetes.shape[0]) rng = np.random.RandomState(0) rng.shuffle(ind) ind = ind[:200] X_diabetes, y_diabetes = X_diabetes[ind], y_diabetes[ind] iris = datasets.load_iris() X_iris = sp.csr_matrix(iris.data) y_iris = iris.target DENSE_FILTER = lambda X: X SPARSE_FILTER = lambda X: sp.csr_matrix(X) def test_ridge(): # Ridge regression convergence test using score # TODO: for this test to be robust, we should use a dataset instead # of np.random. rng = np.random.RandomState(0) alpha = 1.0 for solver in ("svd", "sparse_cg", "cholesky", "lsqr"): # With more samples than features n_samples, n_features = 6, 5 y = rng.randn(n_samples) X = rng.randn(n_samples, n_features) ridge = Ridge(alpha=alpha, solver=solver) ridge.fit(X, y) assert_equal(ridge.coef_.shape, (X.shape[1], )) assert_greater(ridge.score(X, y), 0.47) if solver == "cholesky": # Currently the only solver to support sample_weight. ridge.fit(X, y, sample_weight=np.ones(n_samples)) assert_greater(ridge.score(X, y), 0.47) # With more features than samples n_samples, n_features = 5, 10 y = rng.randn(n_samples) X = rng.randn(n_samples, n_features) ridge = Ridge(alpha=alpha, solver=solver) ridge.fit(X, y) assert_greater(ridge.score(X, y), .9) if solver == "cholesky": # Currently the only solver to support sample_weight. ridge.fit(X, y, sample_weight=np.ones(n_samples)) assert_greater(ridge.score(X, y), 0.9) def test_primal_dual_relationship(): y = y_diabetes.reshape(-1, 1) coef = _solve_cholesky(X_diabetes, y, alpha=[1e-2]) K = np.dot(X_diabetes, X_diabetes.T) dual_coef = _solve_cholesky_kernel(K, y, alpha=[1e-2]) coef2 = np.dot(X_diabetes.T, dual_coef).T assert_array_almost_equal(coef, coef2) def test_ridge_singular(): # test on a singular matrix rng = np.random.RandomState(0) n_samples, n_features = 6, 6 y = rng.randn(n_samples // 2) y = np.concatenate((y, y)) X = rng.randn(n_samples // 2, n_features) X = np.concatenate((X, X), axis=0) ridge = Ridge(alpha=0) ridge.fit(X, y) assert_greater(ridge.score(X, y), 0.9) def test_ridge_sample_weights(): rng = np.random.RandomState(0) for solver in ("cholesky", ): for n_samples, n_features in ((6, 5), (5, 10)): for alpha in (1.0, 1e-2): y = rng.randn(n_samples) X = rng.randn(n_samples, n_features) sample_weight = 1 + rng.rand(n_samples) coefs = ridge_regression(X, y, alpha=alpha, sample_weight=sample_weight, solver=solver) # Sample weight can be implemented via a simple rescaling # for the square loss. coefs2 = ridge_regression( X * np.sqrt(sample_weight)[:, np.newaxis], y * np.sqrt(sample_weight), alpha=alpha, solver=solver) assert_array_almost_equal(coefs, coefs2) # Test for fit_intercept = True est = Ridge(alpha=alpha, solver=solver) est.fit(X, y, sample_weight=sample_weight) # Check using Newton's Method # Quadratic function should be solved in a single step. # Initialize sample_weight = np.sqrt(sample_weight) X_weighted = sample_weight[:, np.newaxis] * ( np.column_stack((np.ones(n_samples), X))) y_weighted = y * sample_weight # Gradient is (X*coef-y)*X + alpha*coef_[1:] # Remove coef since it is initialized to zero. grad = -np.dot(y_weighted, X_weighted) # Hessian is (X.T*X) + alpha*I except that the first # diagonal element should be zero, since there is no # penalization of intercept. diag = alpha * np.ones(n_features + 1) diag[0] = 0. hess = np.dot(X_weighted.T, X_weighted) hess.flat[::n_features + 2] += diag coef_ = - np.dot(linalg.inv(hess), grad) assert_almost_equal(coef_[0], est.intercept_) assert_array_almost_equal(coef_[1:], est.coef_) def test_ridge_shapes(): # Test shape of coef_ and intercept_ rng = np.random.RandomState(0) n_samples, n_features = 5, 10 X = rng.randn(n_samples, n_features) y = rng.randn(n_samples) Y1 = y[:, np.newaxis] Y = np.c_[y, 1 + y] ridge = Ridge() ridge.fit(X, y) assert_equal(ridge.coef_.shape, (n_features,)) assert_equal(ridge.intercept_.shape, ()) ridge.fit(X, Y1) assert_equal(ridge.coef_.shape, (1, n_features)) assert_equal(ridge.intercept_.shape, (1, )) ridge.fit(X, Y) assert_equal(ridge.coef_.shape, (2, n_features)) assert_equal(ridge.intercept_.shape, (2, )) def test_ridge_intercept(): # Test intercept with multiple targets GH issue #708 rng = np.random.RandomState(0) n_samples, n_features = 5, 10 X = rng.randn(n_samples, n_features) y = rng.randn(n_samples) Y = np.c_[y, 1. + y] ridge = Ridge() ridge.fit(X, y) intercept = ridge.intercept_ ridge.fit(X, Y) assert_almost_equal(ridge.intercept_[0], intercept) assert_almost_equal(ridge.intercept_[1], intercept + 1.) def test_toy_ridge_object(): # Test BayesianRegression ridge classifier # TODO: test also n_samples > n_features X = np.array([[1], [2]]) Y = np.array([1, 2]) clf = Ridge(alpha=0.0) clf.fit(X, Y) X_test = [[1], [2], [3], [4]] assert_almost_equal(clf.predict(X_test), [1., 2, 3, 4]) assert_equal(len(clf.coef_.shape), 1) assert_equal(type(clf.intercept_), np.float64) Y = np.vstack((Y, Y)).T clf.fit(X, Y) X_test = [[1], [2], [3], [4]] assert_equal(len(clf.coef_.shape), 2) assert_equal(type(clf.intercept_), np.ndarray) def test_ridge_vs_lstsq(): # On alpha=0., Ridge and OLS yield the same solution. rng = np.random.RandomState(0) # we need more samples than features n_samples, n_features = 5, 4 y = rng.randn(n_samples) X = rng.randn(n_samples, n_features) ridge = Ridge(alpha=0., fit_intercept=False) ols = LinearRegression(fit_intercept=False) ridge.fit(X, y) ols.fit(X, y) assert_almost_equal(ridge.coef_, ols.coef_) ridge.fit(X, y) ols.fit(X, y) assert_almost_equal(ridge.coef_, ols.coef_) def test_ridge_individual_penalties(): # Tests the ridge object using individual penalties rng = np.random.RandomState(42) n_samples, n_features, n_targets = 20, 10, 5 X = rng.randn(n_samples, n_features) y = rng.randn(n_samples, n_targets) penalties = np.arange(n_targets) coef_cholesky = np.array([ Ridge(alpha=alpha, solver="cholesky").fit(X, target).coef_ for alpha, target in zip(penalties, y.T)]) coefs_indiv_pen = [ Ridge(alpha=penalties, solver=solver, tol=1e-6).fit(X, y).coef_ for solver in ['svd', 'sparse_cg', 'lsqr', 'cholesky']] for coef_indiv_pen in coefs_indiv_pen: assert_array_almost_equal(coef_cholesky, coef_indiv_pen) # Test error is raised when number of targets and penalties do not match. ridge = Ridge(alpha=penalties[:3]) assert_raises(ValueError, ridge.fit, X, y) def _test_ridge_loo(filter_): # test that can work with both dense or sparse matrices n_samples = X_diabetes.shape[0] ret = [] ridge_gcv = _RidgeGCV(fit_intercept=False) ridge = Ridge(alpha=1.0, fit_intercept=False) # generalized cross-validation (efficient leave-one-out) decomp = ridge_gcv._pre_compute(X_diabetes, y_diabetes) errors, c = ridge_gcv._errors(1.0, y_diabetes, *decomp) values, c = ridge_gcv._values(1.0, y_diabetes, *decomp) # brute-force leave-one-out: remove one example at a time errors2 = [] values2 = [] for i in range(n_samples): sel = np.arange(n_samples) != i X_new = X_diabetes[sel] y_new = y_diabetes[sel] ridge.fit(X_new, y_new) value = ridge.predict([X_diabetes[i]])[0] error = (y_diabetes[i] - value) ** 2 errors2.append(error) values2.append(value) # check that efficient and brute-force LOO give same results assert_almost_equal(errors, errors2) assert_almost_equal(values, values2) # generalized cross-validation (efficient leave-one-out, # SVD variation) decomp = ridge_gcv._pre_compute_svd(X_diabetes, y_diabetes) errors3, c = ridge_gcv._errors_svd(ridge.alpha, y_diabetes, *decomp) values3, c = ridge_gcv._values_svd(ridge.alpha, y_diabetes, *decomp) # check that efficient and SVD efficient LOO give same results assert_almost_equal(errors, errors3) assert_almost_equal(values, values3) # check best alpha ridge_gcv.fit(filter_(X_diabetes), y_diabetes) alpha_ = ridge_gcv.alpha_ ret.append(alpha_) # check that we get same best alpha with custom loss_func f = ignore_warnings scoring = make_scorer(mean_squared_error, greater_is_better=False) ridge_gcv2 = RidgeCV(fit_intercept=False, scoring=scoring) f(ridge_gcv2.fit)(filter_(X_diabetes), y_diabetes) assert_equal(ridge_gcv2.alpha_, alpha_) # check that we get same best alpha with custom score_func func = lambda x, y: -mean_squared_error(x, y) scoring = make_scorer(func) ridge_gcv3 = RidgeCV(fit_intercept=False, scoring=scoring) f(ridge_gcv3.fit)(filter_(X_diabetes), y_diabetes) assert_equal(ridge_gcv3.alpha_, alpha_) # check that we get same best alpha with a scorer scorer = get_scorer('mean_squared_error') ridge_gcv4 = RidgeCV(fit_intercept=False, scoring=scorer) ridge_gcv4.fit(filter_(X_diabetes), y_diabetes) assert_equal(ridge_gcv4.alpha_, alpha_) # check that we get same best alpha with sample weights ridge_gcv.fit(filter_(X_diabetes), y_diabetes, sample_weight=np.ones(n_samples)) assert_equal(ridge_gcv.alpha_, alpha_) # simulate several responses Y = np.vstack((y_diabetes, y_diabetes)).T ridge_gcv.fit(filter_(X_diabetes), Y) Y_pred = ridge_gcv.predict(filter_(X_diabetes)) ridge_gcv.fit(filter_(X_diabetes), y_diabetes) y_pred = ridge_gcv.predict(filter_(X_diabetes)) assert_array_almost_equal(np.vstack((y_pred, y_pred)).T, Y_pred, decimal=5) return ret def _test_ridge_cv(filter_): n_samples = X_diabetes.shape[0] ridge_cv = RidgeCV() ridge_cv.fit(filter_(X_diabetes), y_diabetes) ridge_cv.predict(filter_(X_diabetes)) assert_equal(len(ridge_cv.coef_.shape), 1) assert_equal(type(ridge_cv.intercept_), np.float64) cv = KFold(n_samples, 5) ridge_cv.set_params(cv=cv) ridge_cv.fit(filter_(X_diabetes), y_diabetes) ridge_cv.predict(filter_(X_diabetes)) assert_equal(len(ridge_cv.coef_.shape), 1) assert_equal(type(ridge_cv.intercept_), np.float64) def _test_ridge_diabetes(filter_): ridge = Ridge(fit_intercept=False) ridge.fit(filter_(X_diabetes), y_diabetes) return np.round(ridge.score(filter_(X_diabetes), y_diabetes), 5) def _test_multi_ridge_diabetes(filter_): # simulate several responses Y = np.vstack((y_diabetes, y_diabetes)).T n_features = X_diabetes.shape[1] ridge = Ridge(fit_intercept=False) ridge.fit(filter_(X_diabetes), Y) assert_equal(ridge.coef_.shape, (2, n_features)) Y_pred = ridge.predict(filter_(X_diabetes)) ridge.fit(filter_(X_diabetes), y_diabetes) y_pred = ridge.predict(filter_(X_diabetes)) assert_array_almost_equal(np.vstack((y_pred, y_pred)).T, Y_pred, decimal=3) def _test_ridge_classifiers(filter_): n_classes = np.unique(y_iris).shape[0] n_features = X_iris.shape[1] for clf in (RidgeClassifier(), RidgeClassifierCV()): clf.fit(filter_(X_iris), y_iris) assert_equal(clf.coef_.shape, (n_classes, n_features)) y_pred = clf.predict(filter_(X_iris)) assert_greater(np.mean(y_iris == y_pred), .79) n_samples = X_iris.shape[0] cv = KFold(n_samples, 5) clf = RidgeClassifierCV(cv=cv) clf.fit(filter_(X_iris), y_iris) y_pred = clf.predict(filter_(X_iris)) assert_true(np.mean(y_iris == y_pred) >= 0.8) def _test_tolerance(filter_): ridge = Ridge(tol=1e-5) ridge.fit(filter_(X_diabetes), y_diabetes) score = ridge.score(filter_(X_diabetes), y_diabetes) ridge2 = Ridge(tol=1e-3) ridge2.fit(filter_(X_diabetes), y_diabetes) score2 = ridge2.score(filter_(X_diabetes), y_diabetes) assert_true(score >= score2) def test_dense_sparse(): for test_func in (_test_ridge_loo, _test_ridge_cv, _test_ridge_diabetes, _test_multi_ridge_diabetes, _test_ridge_classifiers, _test_tolerance): # test dense matrix ret_dense = test_func(DENSE_FILTER) # test sparse matrix ret_sparse = test_func(SPARSE_FILTER) # test that the outputs are the same if ret_dense is not None and ret_sparse is not None: assert_array_almost_equal(ret_dense, ret_sparse, decimal=3) def test_ridge_cv_sparse_svd(): X = sp.csr_matrix(X_diabetes) ridge = RidgeCV(gcv_mode="svd") assert_raises(TypeError, ridge.fit, X) def test_ridge_sparse_svd(): X = sp.csc_matrix(rng.rand(100, 10)) y = rng.rand(100) ridge = Ridge(solver='svd') assert_raises(TypeError, ridge.fit, X, y) def test_class_weights(): # Test class weights. X = np.array([[-1.0, -1.0], [-1.0, 0], [-.8, -1.0], [1.0, 1.0], [1.0, 0.0]]) y = [1, 1, 1, -1, -1] clf = RidgeClassifier(class_weight=None) clf.fit(X, y) assert_array_equal(clf.predict([[0.2, -1.0]]), np.array([1])) # we give a small weights to class 1 clf = RidgeClassifier(class_weight={1: 0.001}) clf.fit(X, y) # now the hyperplane should rotate clock-wise and # the prediction on this point should shift assert_array_equal(clf.predict([[0.2, -1.0]]), np.array([-1])) # check if class_weight = 'auto' can handle negative labels. clf = RidgeClassifier(class_weight='auto') clf.fit(X, y) assert_array_equal(clf.predict([[0.2, -1.0]]), np.array([1])) # class_weight = 'auto', and class_weight = None should return # same values when y has equal number of all labels X = np.array([[-1.0, -1.0], [-1.0, 0], [-.8, -1.0], [1.0, 1.0]]) y = [1, 1, -1, -1] clf = RidgeClassifier(class_weight=None) clf.fit(X, y) clfa = RidgeClassifier(class_weight='auto') clfa.fit(X, y) assert_equal(len(clfa.classes_), 2) assert_array_almost_equal(clf.coef_, clfa.coef_) assert_array_almost_equal(clf.intercept_, clfa.intercept_) def test_class_weights_cv(): # Test class weights for cross validated ridge classifier. X = np.array([[-1.0, -1.0], [-1.0, 0], [-.8, -1.0], [1.0, 1.0], [1.0, 0.0]]) y = [1, 1, 1, -1, -1] clf = RidgeClassifierCV(class_weight=None, alphas=[.01, .1, 1]) clf.fit(X, y) # we give a small weights to class 1 clf = RidgeClassifierCV(class_weight={1: 0.001}, alphas=[.01, .1, 1, 10]) clf.fit(X, y) assert_array_equal(clf.predict([[-.2, 2]]), np.array([-1])) def test_ridgecv_store_cv_values(): # Test _RidgeCV's store_cv_values attribute. rng = rng = np.random.RandomState(42) n_samples = 8 n_features = 5 x = rng.randn(n_samples, n_features) alphas = [1e-1, 1e0, 1e1] n_alphas = len(alphas) r = RidgeCV(alphas=alphas, store_cv_values=True) # with len(y.shape) == 1 y = rng.randn(n_samples) r.fit(x, y) assert_equal(r.cv_values_.shape, (n_samples, n_alphas)) # with len(y.shape) == 2 n_responses = 3 y = rng.randn(n_samples, n_responses) r.fit(x, y) assert_equal(r.cv_values_.shape, (n_samples, n_responses, n_alphas)) def test_raises_value_error_if_sample_weights_greater_than_1d(): # Sample weights must be either scalar or 1D n_sampless = [2, 3] n_featuress = [3, 2] rng = np.random.RandomState(42) for n_samples, n_features in zip(n_sampless, n_featuress): X = rng.randn(n_samples, n_features) y = rng.randn(n_samples) sample_weights_OK = rng.randn(n_samples) ** 2 + 1 sample_weights_OK_1 = 1. sample_weights_OK_2 = 2. sample_weights_not_OK = sample_weights_OK[:, np.newaxis] sample_weights_not_OK_2 = sample_weights_OK[np.newaxis, :] ridge = Ridge(alpha=1) # make sure the "OK" sample weights actually work ridge.fit(X, y, sample_weights_OK) ridge.fit(X, y, sample_weights_OK_1) ridge.fit(X, y, sample_weights_OK_2) def fit_ridge_not_ok(): ridge.fit(X, y, sample_weights_not_OK) def fit_ridge_not_ok_2(): ridge.fit(X, y, sample_weights_not_OK_2) assert_raise_message(ValueError, "Sample weights must be 1D array or scalar", fit_ridge_not_ok) assert_raise_message(ValueError, "Sample weights must be 1D array or scalar", fit_ridge_not_ok_2) def test_sparse_design_with_sample_weights(): # Sample weights must work with sparse matrices n_sampless = [2, 3] n_featuress = [3, 2] rng = np.random.RandomState(42) sparse_matrix_converters = [sp.coo_matrix, sp.csr_matrix, sp.csc_matrix, sp.lil_matrix, sp.dok_matrix ] sparse_ridge = Ridge(alpha=1., fit_intercept=False) dense_ridge = Ridge(alpha=1., fit_intercept=False) for n_samples, n_features in zip(n_sampless, n_featuress): X = rng.randn(n_samples, n_features) y = rng.randn(n_samples) sample_weights = rng.randn(n_samples) ** 2 + 1 for sparse_converter in sparse_matrix_converters: X_sparse = sparse_converter(X) sparse_ridge.fit(X_sparse, y, sample_weight=sample_weights) dense_ridge.fit(X, y, sample_weight=sample_weights) assert_array_almost_equal(sparse_ridge.coef_, dense_ridge.coef_, decimal=6) def test_raises_value_error_if_solver_not_supported(): # Tests whether a ValueError is raised if a non-identified solver # is passed to ridge_regression wrong_solver = "This is not a solver (MagritteSolveCV QuantumBitcoin)" exception = ValueError message = "Solver %s not understood" % wrong_solver def func(): X = np.eye(3) y = np.ones(3) ridge_regression(X, y, alpha=1., solver=wrong_solver) assert_raise_message(exception, message, func) def test_sparse_cg_max_iter(): reg = Ridge(solver="sparse_cg", max_iter=1) reg.fit(X_diabetes, y_diabetes) assert_equal(reg.coef_.shape[0], X_diabetes.shape[1])
bsd-3-clause
siavooshpayandehazad/SoCDep2
src/main/python/SystemHealthMonitoring/FaultClassifier/ML.py
2
6439
# Copyright (C) 2015 Rene Pihlak # import numpy as np import collections import matplotlib.pyplot as plt from sklearn import svm from sklearn.neighbors import KNeighborsClassifier from sklearn.tree import DecisionTreeClassifier import matplotlib.animation as animation def updatefig(*args): global index, new_data, in_data, dmb_bad, dmb_good, classifierSVM, classifierKNN, classifierDTree, linesvm, \ axsvm, axknn, axdtr, axdmb, lineknn, linedtr, linedmb, learn_labels_names arr = [] if index < len(in_data) - 5: dmb = "good" new_data.append(in_data[index]) if(in_data[index] == 1): dmb_bad.append(1) else: dmb_good.append(1) if(sum(dmb_good) == 5): for i in range(0, 5): dmb_good.append(0) dmb_bad.append(0) # elif dmb_bad[4] == 1: # dmb = "bad" # for i in range(0, 5): # dmb_good.append(0) if sum(dmb_bad) == 5: dmb = "bad" predictedSVM = classifierSVM.predict(new_data) predictedKNN = classifierKNN.predict(new_data) predictedDTree = classifierDTree.predict(new_data) for i in range(0, len(new_data)): arr.append(new_data[i]) linesvm.set_ydata(arr) lineknn.set_ydata(arr) linedtr.set_ydata(arr) linedmb.set_ydata(arr) linesvm.set_xdata(range(index, index + 5)) lineknn.set_xdata(range(index, index + 5)) linedtr.set_xdata(range(index, index + 5)) linedmb.set_xdata(range(index, index + 5)) axsvm.set_ylim(-0.2, 1.3) axknn.set_ylim(-0.2, 1.3) axdtr.set_ylim(-0.2, 1.3) axdmb.set_ylim(-0.2, 1.3) axsvm.set_xlim(index - 0.5, index + len(arr) - 0.5) axknn.set_xlim(index - 0.5, index + len(arr) - 0.5) axdtr.set_xlim(index - 0.5, index + len(arr) - 0.5) axdmb.set_xlim(index - 0.5, index + len(arr) - 0.5) # print(learn_labels_names[predictedSVM[0]]) axsvm.set_title("SVM: %s" % learn_labels_names[predictedSVM[0]]) axknn.set_title("KNN: %s" % learn_labels_names[predictedKNN[0]]) axdtr.set_title("DTree: %s" % learn_labels_names[predictedDTree[0]]) axdmb.set_title("DMB: Good - %d, Bad - %d, Total: %s" % (sum(dmb_good), sum(dmb_bad), dmb)) axsvm.figure.canvas.draw() axknn.figure.canvas.draw() axdtr.figure.canvas.draw() axdmb.figure.canvas.draw() index += 1 return linesvm, lineknn, linedtr, linedmb, def ML(): figure = plt.figure(figsize=(27, 9)) # fig, ax = plt.subplots() # line, = ax.step([], [], lw=2) # ax.grid() # xdata, ydata = [], [] axsvm = plt.subplot(2, 4, 5) linesvm, = axsvm.step([], [], lw=2) axsvm.grid() xdatasvm, ydatasvm = [], [] axknn = plt.subplot(2, 4, 6) lineknn, = axknn.step([], [], lw=2) axknn.grid() xdataknn, ydataknn = [], [] axdtr = plt.subplot(2, 4, 7) linedtr, = axdtr.step([], [], lw=2) axdtr.grid() xdatadtr, ydatadtr = [], [] axdmb = plt.subplot(2, 4, 8) linedmb, = axdmb.step([], [], lw=2) axdmb.grid() xdatadmb, ydatadmb = [], [] # MUST HAVE EQUAL NUMBER OF ALL TYPES, # i.e., if n classified groups, then learning set size = n x m learn_data = [ [1, 1, 1, 1, 1], [1, 1, 1, 0, 0], [1, 0, 0, 0, 0], [0, 0, 0, 0, 0], [1, 1, 1, 1, 1], [0, 1, 1, 1, 0], [0, 1, 0, 0, 0], [0, 0, 0, 0, 0], [1, 1, 1, 1, 1], [0, 0, 1, 1, 1], [0, 0, 1, 0, 0], [0, 0, 0, 0, 0], [1, 1, 1, 1, 1], [1, 0, 0, 1, 1], [0, 0, 0, 1, 0], [0, 0, 0, 0, 0], [1, 1, 1, 1, 1], [1, 1, 0, 0, 1], [0, 0, 0, 0, 1], [0, 0, 0, 0, 0]] learn_labels = [0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3] learn_labels_names = ["dead", "dying", "ok", "perfect", "dead", "dying", "ok", "perfect", "dead", "dying", "ok", "perfect", "dead", "dying", "ok", "perfect", "dead", "dying", "ok", "perfect"] print("LEARNING DATA:") for index in range(0, len(learn_data)): print("%s: %d :: %s" % (learn_data[index], learn_labels[index], learn_labels_names[index])) if(index < 4): ax1 = plt.subplot(2, 4, index + 1) line1, = ax1.step([], [], lw=2) ax1.grid() line1.set_ydata(learn_data[index]) line1.set_xdata(range(0, 5)) ax1.set_ylim(-0.2, 1.3) ax1.set_xlim(-0.5, 4.5) ax1.set_title("Training: %s" % learn_labels_names[index]) ax1.figure.canvas.draw() # plt.step(range(0,5),learn_data[10], where='pre') # plt.xlim(-0.5, len(learn_data[10])) # plt.ylim(-0.2, 1.3) # plt.title("Learned: %s" % (learn_labels_names[10])) # plt.draw() in_data = [1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 0, 1, 1, 1] new_data = collections.deque(maxlen=5) dmb_good = collections.deque(maxlen=5) dmb_bad = collections.deque(maxlen=5) for i in range(0, 5): new_data.append(0) dmb_bad.append(0) dmb_good.append(0) index = 0 # ax.set_ylim(-1.1, 1.1) # ax.set_xlim(0, 10) # del xdata[:] # del ydata[:] # line.set_data(xdata, ydata) # Create a classifier: a support vector classifier classifierSVM = svm.SVC(gamma=0.001) classifierKNN = KNeighborsClassifier(4) classifierDTree = DecisionTreeClassifier(max_depth=5) # We learn the digits on the first half of the digits classifierSVM.fit(learn_data, learn_labels) classifierKNN.fit(learn_data, learn_labels) classifierDTree.fit(learn_data, learn_labels) # predicted = classifier.predict(data) movis = animation.writers['avconv'] movi = movis(fps=1, bitrate=1800) ani = animation.FuncAnimation(figure, updatefig, interval=500, blit=False) ani.save('../GraphDrawings/mlmultipletest.mp4', writer=movi, fps=1, bitrate=1800) # print("ANALYSED DATA:") # for index in range(0, len(data)): # print("%s: %s" % (data[index], learn_labels_names[predicted[index]])) plt.show()
gpl-2.0
woobe/h2o
py/testdir_multi_jvm/test_rf_mnist_fvec.py
1
6526
import unittest import random, sys, time, re sys.path.extend(['.','..','py']) import h2o, h2o_cmd, h2o_hosts, h2o_browse as h2b, h2o_import as h2i, h2o_glm, h2o_util, h2o_rf, h2o_jobs DO_POLL = False class Basic(unittest.TestCase): def tearDown(self): h2o.check_sandbox_for_errors() @classmethod def setUpClass(cls): # assume we're at 0xdata with it's hdfs namenode global localhost localhost = h2o.decide_if_localhost() if (localhost): h2o.build_cloud(1, java_heap_GB=28) else: h2o_hosts.build_cloud_with_hosts() @classmethod def tearDownClass(cls): h2o.tear_down_cloud() def test_rf_mnist_fvec(self): h2o.beta_features = True importFolderPath = "mnist" csvFilelist = [ # ("mnist_testing.csv.gz", "mnist_testing.csv.gz", 600), # ("a.csv", "b.csv", 60), # ("mnist_testing.csv.gz", "mnist_testing.csv.gz", 600), ("mnist_training.csv.gz", "mnist_testing.csv.gz", 600), ] trial = 0 for (trainCsvFilename, testCsvFilename, timeoutSecs) in csvFilelist: trialStart = time.time() # PARSE test**************************************** testKey2 = testCsvFilename + "_" + str(trial) + ".hex" start = time.time() parseResult = h2i.import_parse(bucket='home-0xdiag-datasets', path=importFolderPath + "/" + testCsvFilename, hex_key=testKey2, timeoutSecs=timeoutSecs) elapsed = time.time() - start print "parse end on ", testCsvFilename, 'took', elapsed, 'seconds',\ "%d pct. of timeout" % ((elapsed*100)/timeoutSecs) print "parse result:", parseResult['destination_key'] print "We won't use this pruning of x on test data. See if it prunes the same as the training" y = 0 # first column is pixel value print "y:" # x = h2o_glm.goodXFromColumnInfo(y, key=parseResult['destination_key'], timeoutSecs=300) # PARSE train**************************************** trainKey2 = trainCsvFilename + "_" + str(trial) + ".hex" start = time.time() parseResult = h2i.import_parse(bucket='home-0xdiag-datasets', path=importFolderPath + "/" + trainCsvFilename, schema='local', hex_key=trainKey2, timeoutSecs=timeoutSecs) elapsed = time.time() - start print "parse end on ", trainCsvFilename, 'took', elapsed, 'seconds',\ "%d pct. of timeout" % ((elapsed*100)/timeoutSecs) print "parse result:", parseResult['destination_key'] # RF+RFView (train)**************************************** print "This is the 'ignore=' we'll use" ignore_x = h2o_glm.goodXFromColumnInfo(y, key=parseResult['destination_key'], timeoutSecs=300, forRF=True) params = { 'response': 'C' + str(y+1), 'cols': None, 'ignored_cols_by_name': ignore_x, 'classification': 1, 'validation': None, 'ntrees': 2, 'max_depth': 20, 'min_rows': None, 'nbins': 1000, 'mtries': None, 'sample_rate': 0.66, 'seed': None, } rfViewInitial = [] for jobDispatch in range(1): # adjust timeoutSecs with the number of trees # seems ec2 can be really slow params['destination_key'] = 'RFModel_' + str('jobDispatch') kwargs = params.copy() timeoutSecs = 1200 start = time.time() rfResult = h2o_cmd.runRF(parseResult=parseResult, timeoutSecs=timeoutSecs, noPoll=not DO_POLL, rfView=DO_POLL, **kwargs) elapsed = time.time() - start # print h2o.dump_json(rfResult) print "rf job dispatch end on ", trainCsvFilename, 'took', time.time() - start, 'seconds' print "\njobDispatch #", jobDispatch # FIX! are these already in there? rfView = {} rfView['data_key'] = trainKey2 rfView['model_key'] = kwargs['destination_key'] rfView['ntrees'] = kwargs['ntrees'] rfViewInitial.append(rfView) if not DO_POLL: h2o_jobs.pollStatsWhileBusy(timeoutSecs=1200, pollTimeoutSecs=120, retryDelaySecs=5) # FIX! need to add the rfview and predict stuff # we saved the initial response? # if we do another poll they should be done now, and better to get it that # way rather than the inspect (to match what simpleCheckGLM is expected print "rfViewInitial", rfViewInitial for rfView in rfViewInitial: print "Checking completed job:", rfView print "rfView", h2o.dump_json(rfView) data_key = rfView['data_key'] model_key = rfView['model_key'] ntrees = rfView['ntrees'] rfView = h2o_cmd.runRFView(None, model_key=model_key, timeoutSecs=60, noPoll=not DO_POLL, doSimpleCheck=False) (classification_error, classErrorPctList, totalScores) = h2o_rf.simpleCheckRFView(rfv=rfView) self.assertAlmostEqual(classification_error, 20, delta=2, msg="Classification error %s differs too much" % classification_error) if not DO_POLL: h2o_jobs.pollStatsWhileBusy(timeoutSecs=300, pollTimeoutSecs=120, retryDelaySecs=5) # rfView = h2o_cmd.runRFView(None, data_key, model_key, timeoutSecs=60, noPoll=True, doSimpleCheck=False) # print "rfView:", h2o.dump_json(rfView) # "N":1, # "errs":[0.25,0.1682814508676529], # "testKey":"syn_binary_10000x10.hex", # "cm":[[3621,1399],[1515,3465]]}} rf_model = rfView['drf_model'] cms = rf_model['cms'] errs = rf_model['errs'] # FIX! should update this expected classification error ## (classification_error, classErrorPctList, totalScores) = h2o_rf.simpleCheckRFView(rfv=rfView, ntree=ntrees) ## self.assertAlmostEqual(classification_error, 0.03, delta=0.5, msg="Classification error %s differs too much" % classification_error) predict = h2o.nodes[0].generate_predictions(model_key=model_key, data_key=data_key) if __name__ == '__main__': h2o.unit_main()
apache-2.0
otadmor/Open-Knesset
laws/tags/build_important_keyword_html.py
1
8719
#!/usr/bin/python # -*- coding: UTF-8 -*- import numpy import numpy as np import re from functools import partial from sklearn.externals import joblib import cPickle as pickle from collections import defaultdict lb = pickle.load(open("classifier_data/label_binarizer.pkl", "rb")) keywords, data = pickle.load(open('histograms_with_tags.pkl', 'rb')) pp_with_tags = pickle.load(open('pp_with_tags.pkl', "rb")) trained_classifier = joblib.load('classifier_data/linear_svc_classifier.jlb') keywords = numpy.array(sorted(list(keywords))) coefs = np.vstack([e.steps[-1][1].coef_ for e in trained_classifier.estimators_]) import os import codecs try: os.makedirs("important_keywords/tags") except OSError, e: pass try: os.makedirs("important_keywords/keywords") except OSError, e: pass doc_in_class = defaultdict(list) print "creating tag dicts" for a in pp_with_tags: for tag in a['tags']: doc_in_class[tag].append(a) print "creating keyword dict" keyword_dict = {} for i,w in enumerate(keywords): keyword_dict[w] = i print "splitting docs" pp_with_tags = pickle.load(open('pp_with_tags.pkl', "rb")) pp_dict = {} for pp in pp_with_tags: pp_dict[pp['id']] = pp['text'] def output_tag(class_file, w, estimator_max, estimator_min, weight, hyperlink = False): orig_weight = weight pre = post = '' if weight > 0 and estimator_max != 0: weight /= estimator_max if weight > 0.05 and hyperlink: pre = u"<a href='../tags/" + w.replace(u'/', u' ') + u".html' title='" + str(orig_weight) + u"'>" post = u"</a>" print >>class_file, pre + u"<span style='color:#00%02X00;'>" % (int(255 * weight),) + w + u"</span>" + post + "&nbsp;" elif weight < 0 and estimator_min != 0: weight /= estimator_min if weight > 0.05 and hyperlink: pre = u"<a href='../tags/" + w.replace(u'/', u' ') + u".html' title='" + str(orig_weight) + u"'>" post = u"</a>" print >>class_file, pre + u"<span style='color:#%02X0000;'>" % (int(255 * weight),) + w + u"</span>" + post + "&nbsp;" else: print >>class_file, u"<span style='color:#000000;'>" + w + u"</span>&nbsp;" def output_word(class_file, w, estimator_max, estimator_min, coefs, keyword_dict, hyperlink = False): weight = coefs[keyword_dict[w]] pre = post = '' if weight > 0 and estimator_max != 0: weight /= estimator_max if weight > 0.05 and hyperlink: pre = u"<a href='../keywords/" + w.replace(u'/', u' ') + u".html' title='" + str(coefs[keyword_dict[w]]) + u"'>" post = u"</a>" print >>class_file, pre + u"<span style='color:#00%02X00;'>" % (int(255 * weight),) + w + u"</span>" + post + "&nbsp;" elif weight < 0 and estimator_min != 0: weight /= estimator_min if weight > 0.05 and hyperlink: pre = u"<a href='../keywords/" + w.replace(u'/', u' ') + u".html' title='" + str(coefs[keyword_dict[w]]) + u"'>" post = u"</a>" print >>class_file, pre + u"<span style='color:#%02X0000;'>" % (int(255 * weight),) + w + u"</span>" + post + "&nbsp;" else: print >>class_file, u"<span style='color:#000000;'>" + w + u"</span>&nbsp;" def format_word(w, estimator_max, estimator_min, coefs, keyword_dict, hyperlink = False): orig_Weight = weight = coefs[keyword_dict[w]] pre = post = '' if weight > 0 and estimator_max != 0: weight /= estimator_max if hyperlink: pre = u"<a href='../keywords/" + w.replace(u'/', u' ') + u".html' title='" + str(orig_Weight) + u"'>" post = u"</a>" v = u"<span style='color:#00%02X00;'>" % (int(255 * weight),) + w + u"</span>" elif weight < 0 and estimator_min != 0: weight /= estimator_min if hyperlink: pre = u"<a href='../keywords/" + w.replace(u'/', u' ') + u".html' title='" + str(orig_Weight) + u"'>" post = u"</a>" v = u"<span style='color:#%02X0000;'>" % (int(255 * weight),) + w + u"</span>" else: v = u"<span style='color:#000000;'>" + w + u"</span>" return u"%s%s%s" % (pre, v, post,) ATTR_AMOUNT = 10 important_keywords = keywords[numpy.abs(coefs).argsort()[:,-ATTR_AMOUNT:]] MAX_RE_GROUP = 100 def create_re(estimator_max, estimator_min, coefs, keyword_dict): #return ("|".join(p) for p in zip(*( # ( # u"(?P<pre_%d>\\b)%s(?P<post_%d>\\b)" % (i, re.escape(k), i, ), # u"\\g<pre_%d>%s\\g<post_%d>" % (i, re.escape(format_word(k, estimator_max, estimator_min, estimator, keyword_dict, hyperlink = True)), i, ), # ) # for i, k # in enumerate(keywords[numpy.where(estimator.coef_[0] != 0)]) # ) #)) return { k : format_word(k, estimator_max, estimator_min, coefs, keyword_dict, hyperlink = True) for k in keywords[numpy.where(coefs != 0)] } ALLOWED_PRELETTERS = 3 MIN_WORD_LEN = 2 def replace_match(replacements, match): whole_match_str = match_str = match.group(0) if match_str in replacements: return replacements[match_str] for i in xrange(1, min(ALLOWED_PRELETTERS + 1, len(match_str) - MIN_WORD_LEN + 1)): pre, pre_match_str = match_str[:i], match_str[i:] if pre_match_str in replacements: return pre + replacements[pre_match_str] return u"[%s]" % (match.group(0),) print "creating files" for i, (class_name, important_keyword_for_class, estimator) in enumerate(zip(lb.classes_, important_keywords, trained_classifier.estimators_)): print "tag", str(i), "/", str(len(lb.classes_)) class_file = codecs.open("important_keywords/tags/" + class_name.replace("/", ' ') + ".html", "wt", encoding="utf-8") print >>class_file, u"<html dir='rtl'><head><title>Keyword Explanation for Class " + class_name + u"</title></head><body><meta http-equiv='Content-Type' content='text/html;charset=UTF-8'>" print >>class_file, u"<h1>" + class_name + u"</h1><br/>" print >>class_file, u"<h2>Most influencing words</h2>" estimator_min, estimator_max = coefs[i,:].min(), coefs[i,:].max() for keyword in important_keyword_for_class: output_word(class_file, keyword, estimator_max, estimator_min, coefs[i], keyword_dict) print >>class_file, u"(", str(coefs[i,keyword_dict[keyword]]), u")<br/>" # tag_pattern, tag_replacement_pattern = create_re(estimator_max, estimator_min, estimator, keyword_dict) # tag_re = re.compile(tag_pattern, re.MULTILINE | re.UNICODE) replacements = create_re(estimator_max, estimator_min, coefs[i], keyword_dict) replacements_re = re.compile('|'.join(('|'.join((u'\\b%s%s\\b' % (u'\\w' * i, p,) for p in replacements.iterkeys())) for i in xrange(ALLOWED_PRELETTERS + 1))), re.UNICODE) print >>class_file, u"<table border='1'>" for doc in doc_in_class[class_name]: print >>class_file, u"<tr><td valign='top'>" + unicode(doc['id']) + u"</td><td valign='top'>" print >>class_file, replacements_re.sub(partial(replace_match, replacements), pp_dict[doc['id']]) #import pdb; pdb.set_trace() # print >>class_file, reduce(lambda s,(o,r): re.sub(o,r,s, flags=re.UNICODE), replacements, pp_dict[doc['id']]) # print >>class_file, tag_re.sub(tag_replacement_pattern, pp_dict[doc['id']]) print >>class_file, u"</td></tr>" print >>class_file, u"</table></body></html>" class_file.close() coef_abs = numpy.abs(coefs) sorted_keywords = coef_abs.argsort(axis = 0) min_weight, max_weight = coefs.min(axis = 1), coefs.max(axis = 1) for keyword_count, i in enumerate(coef_abs.sum(axis = 0).argsort()[::-1]): keyword = keywords[i] print "keyword", str(keyword_count), "/", str(len(keywords)), "(", str(i), ")" keyword_file = codecs.open("important_keywords/keywords/" + keyword.replace("/", ' ') + ".html", "wt", encoding="utf-8") print >>keyword_file, u"<html dir='rtl'><head><title>Tag Explanation for Keyword " + keyword + u"</title></head><body><meta http-equiv='Content-Type' content='text/html;charset=UTF-8'>" print >>keyword_file, u"<h1>" + keyword + u"</h1><br/>" print >>keyword_file, u"<h2>Most influenced tags</h2>" weights = coefs[:,i] class_keywords = sorted_keywords[::-1,i] for class_name, weight, max_weight_, min_weight_ in zip(lb.classes_[class_keywords], weights[class_keywords], max_weight[class_keywords], min_weight[class_keywords]): if weight != 0: output_tag(keyword_file, class_name, max_weight_, min_weight_, weight, hyperlink = True) print >>keyword_file, u"<br/>" print >>keyword_file, u"</body></html>" keyword_file.close()
bsd-3-clause
RecipeML/Recipe
recipe/preprocessors/percentile.py
1
1328
# -*- coding: utf-8 -*- """ Copyright 2016 Walter José and Alex de Sá This file is part of the RECIPE Algorithm. The RECIPE is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. RECIPE is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. See http://www.gnu.org/licenses/. """ from sklearn.feature_selection import f_classif, chi2, SelectPercentile def percentile(args): """Uses scikit-learn's SelectPercentile, select features according to a percentile of the highest scores.. Parameters ---------- score_func : callable Function taking two arrays X and y, and returning a pair of arrays (scores, pvalues). alpha : float, optional The highest uncorrected p-value for features to keep. """ if(args[2]=="chi2"): selector = SelectPercentile(chi2, percentile=int(args[1])) elif(args[2]=="f_classif"): selector = SelectPercentile(f_classif, percentile=int(args[1])) return selector
gpl-3.0
jmargeta/scikit-learn
examples/svm/plot_custom_kernel.py
4
1524
""" ====================== SVM with custom kernel ====================== Simple usage of Support Vector Machines to classify a sample. It will plot the decision surface and the support vectors. """ print(__doc__) import numpy as np import pylab as pl from sklearn import svm, datasets # import some data to play with iris = datasets.load_iris() X = iris.data[:, :2] # we only take the first two features. We could # avoid this ugly slicing by using a two-dim dataset Y = iris.target def my_kernel(x, y): """ We create a custom kernel: (2 0) k(x, y) = x ( ) y.T (0 1) """ M = np.array([[2, 0], [0, 1.0]]) return np.dot(np.dot(x, M), y.T) h = .02 # step size in the mesh # we create an instance of SVM and fit out data. clf = svm.SVC(kernel=my_kernel) clf.fit(X, Y) # Plot the decision boundary. For that, we will asign a color to each # point in the mesh [x_min, m_max]x[y_min, y_max]. x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1 y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1 xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h)) Z = clf.predict(np.c_[xx.ravel(), yy.ravel()]) # Put the result into a color plot Z = Z.reshape(xx.shape) pl.pcolormesh(xx, yy, Z, cmap=pl.cm.Paired) # Plot also the training points pl.scatter(X[:, 0], X[:, 1], c=Y, cmap=pl.cm.Paired) pl.title('3-Class classification using Support Vector Machine with custom' ' kernel') pl.axis('tight') pl.show()
bsd-3-clause
fizz-ml/policybandit
trainer.py
1
3859
import torch as t from torch.autograd import Variable as V from torch import FloatTensor as FT import numpy as np from bayestorch.hmc import HMCSampler class SimpleTrainer: def __init__(self, env,critic,hallucinator,policy_buffer,policy_c, noise_dim): self.env = env self.hallucinator = hallucinator self.critic = critic self.policy_buffer = policy_buffer self.policy_c = policy_c self.noise_dim = noise_dim def train(self, train_steps,sample_steps,opt_steps): in_dim=self.env.obs_size out_dim=self.env.action_size cur_policy = self.policy_c(in_dim,out_dim) for i in range(train_steps): reward = self.sample_episode(cur_policy) self.policy_buffer.put(cur_policy.state_dict(),reward) self.train_critic_hallucinator(sample_steps) self.train_policy(opt_steps) def sample_episode(self, policy,n=1,skip = 3): done = False total_reward = 0 for i in range(n): cur_obs = self.env.new_episode() t = 0 while not done: cur_obs = V(FT(cur_obs)).unsqueeze(0) display = (t % skip == 0) cur_action = policy.forward(cur_obs).data.cpu().numpy() cur_obs,cur_reward,done = self.env.next_obs(cur_action.squeeze(0), render = display) total_reward += cur_reward t += 1 avg_episode_reward = total_reward / n return avg_episode_reward def train_critic_hallucinator(self,sample_steps): def closure_gen(): yield (lambda: self.critic.get_prior_llh()) for state_dict,reward in self.policy_buffer: policy = self.policy_c(self.env.obs_size, self.env.action_size) policy.load_state_dict(state_dict) def closure(): noise=V(FT(np.random.randn(self.noise_dim))) states = self.hallucinator.forward(noise.unsqueeze(0)) # Concatenating dimensions of bath(which is currently 1) and dimensions of states = states.view(states.size(0)*self.hallucinator.n, -1) actions = policy.forward(states) actions = actions.view(1,-1) states = states.view(1,-1) mean = self.critic(states,actions)[0] lsd = self.critic(states,actions)[0] llh = gaussian_llh(mean,lsd,reward) return reward yield closure params = self.critic.parameter_list() \ + self.hallucinator.parameter_list() sampler = HMCSampler(params) for i in range(sample_steps): sampler.step(closure_gen) def train_policy(self,opt_steps): state_dict, _ = self.policy_buffer.peek() policy = self.policy_c(self.env.obs_size, self.env.action_size) policy.load_state_dict(state_dict) opt = t.optim.SGD(policy.parameters(), lr=0.001) # This is bad just have one goddamnit def closure(): noise=V(FT(np.random.randn(self.noise_dim))) states = self.hallucinator.forward(noise.unsqueeze(0)) # Concatenating dimensions of bath(which is currently 1) and dimensions of states = states.view(states.size(0)*self.hallucinator.n, -1) actions = policy.forward(states) actions = actions.view(1,-1) states = states.view(1,-1) reward = self.critic(states,actions)[0] return reward for i in range(opt_steps): opt.zero_grad() opt.step(closure) return policy.state_dict() def gaussian_llh(mean,log_std_dev,reward): llh = -(mean-reward)**2 - 2*log_std_dev return llh
mit
laszlocsomor/tensorflow
tensorflow/contrib/learn/python/learn/datasets/text_datasets.py
122
2703
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Text datasets.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import os import tarfile import numpy as np from tensorflow.contrib.learn.python.learn.datasets import base from tensorflow.python.platform import gfile DBPEDIA_URL = 'https://github.com/le-scientifique/torchDatasets/raw/master/dbpedia_csv.tar.gz' def maybe_download_dbpedia(data_dir): """Download if DBpedia data is not present.""" train_path = os.path.join(data_dir, 'dbpedia_csv/train.csv') test_path = os.path.join(data_dir, 'dbpedia_csv/test.csv') if not (gfile.Exists(train_path) and gfile.Exists(test_path)): archive_path = base.maybe_download( 'dbpedia_csv.tar.gz', data_dir, DBPEDIA_URL) tfile = tarfile.open(archive_path, 'r:*') tfile.extractall(data_dir) def load_dbpedia(size='small', test_with_fake_data=False): """Get DBpedia datasets from CSV files.""" if not test_with_fake_data: data_dir = os.path.join(os.getenv('TF_EXP_BASE_DIR', ''), 'dbpedia_data') maybe_download_dbpedia(data_dir) train_path = os.path.join(data_dir, 'dbpedia_csv', 'train.csv') test_path = os.path.join(data_dir, 'dbpedia_csv', 'test.csv') if size == 'small': # Reduce the size of original data by a factor of 1000. base.shrink_csv(train_path, 1000) base.shrink_csv(test_path, 1000) train_path = train_path.replace('train.csv', 'train_small.csv') test_path = test_path.replace('test.csv', 'test_small.csv') else: module_path = os.path.dirname(__file__) train_path = os.path.join(module_path, 'data', 'text_train.csv') test_path = os.path.join(module_path, 'data', 'text_test.csv') train = base.load_csv_without_header( train_path, target_dtype=np.int32, features_dtype=np.str, target_column=0) test = base.load_csv_without_header( test_path, target_dtype=np.int32, features_dtype=np.str, target_column=0) return base.Datasets(train=train, validation=None, test=test)
apache-2.0
ningchi/scikit-learn
examples/neighbors/plot_approximate_nearest_neighbors_hyperparameters.py
226
5170
""" ================================================= Hyper-parameters of Approximate Nearest Neighbors ================================================= This example demonstrates the behaviour of the accuracy of the nearest neighbor queries of Locality Sensitive Hashing Forest as the number of candidates and the number of estimators (trees) vary. In the first plot, accuracy is measured with the number of candidates. Here, the term "number of candidates" refers to maximum bound for the number of distinct points retrieved from each tree to calculate the distances. Nearest neighbors are selected from this pool of candidates. Number of estimators is maintained at three fixed levels (1, 5, 10). In the second plot, the number of candidates is fixed at 50. Number of trees is varied and the accuracy is plotted against those values. To measure the accuracy, the true nearest neighbors are required, therefore :class:`sklearn.neighbors.NearestNeighbors` is used to compute the exact neighbors. """ from __future__ import division print(__doc__) # Author: Maheshakya Wijewardena <maheshakya.10@cse.mrt.ac.lk> # # License: BSD 3 clause ############################################################################### import numpy as np from sklearn.datasets.samples_generator import make_blobs from sklearn.neighbors import LSHForest from sklearn.neighbors import NearestNeighbors import matplotlib.pyplot as plt # Initialize size of the database, iterations and required neighbors. n_samples = 10000 n_features = 100 n_queries = 30 rng = np.random.RandomState(42) # Generate sample data X, _ = make_blobs(n_samples=n_samples + n_queries, n_features=n_features, centers=10, random_state=0) X_index = X[:n_samples] X_query = X[n_samples:] # Get exact neighbors nbrs = NearestNeighbors(n_neighbors=1, algorithm='brute', metric='cosine').fit(X_index) neighbors_exact = nbrs.kneighbors(X_query, return_distance=False) # Set `n_candidate` values n_candidates_values = np.linspace(10, 500, 5).astype(np.int) n_estimators_for_candidate_value = [1, 5, 10] n_iter = 10 stds_accuracies = np.zeros((len(n_estimators_for_candidate_value), n_candidates_values.shape[0]), dtype=float) accuracies_c = np.zeros((len(n_estimators_for_candidate_value), n_candidates_values.shape[0]), dtype=float) # LSH Forest is a stochastic index: perform several iteration to estimate # expected accuracy and standard deviation displayed as error bars in # the plots for j, value in enumerate(n_estimators_for_candidate_value): for i, n_candidates in enumerate(n_candidates_values): accuracy_c = [] for seed in range(n_iter): lshf = LSHForest(n_estimators=value, n_candidates=n_candidates, n_neighbors=1, random_state=seed) # Build the LSH Forest index lshf.fit(X_index) # Get neighbors neighbors_approx = lshf.kneighbors(X_query, return_distance=False) accuracy_c.append(np.sum(np.equal(neighbors_approx, neighbors_exact)) / n_queries) stds_accuracies[j, i] = np.std(accuracy_c) accuracies_c[j, i] = np.mean(accuracy_c) # Set `n_estimators` values n_estimators_values = [1, 5, 10, 20, 30, 40, 50] accuracies_trees = np.zeros(len(n_estimators_values), dtype=float) # Calculate average accuracy for each value of `n_estimators` for i, n_estimators in enumerate(n_estimators_values): lshf = LSHForest(n_estimators=n_estimators, n_neighbors=1) # Build the LSH Forest index lshf.fit(X_index) # Get neighbors neighbors_approx = lshf.kneighbors(X_query, return_distance=False) accuracies_trees[i] = np.sum(np.equal(neighbors_approx, neighbors_exact))/n_queries ############################################################################### # Plot the accuracy variation with `n_candidates` plt.figure() colors = ['c', 'm', 'y'] for i, n_estimators in enumerate(n_estimators_for_candidate_value): label = 'n_estimators = %d ' % n_estimators plt.plot(n_candidates_values, accuracies_c[i, :], 'o-', c=colors[i], label=label) plt.errorbar(n_candidates_values, accuracies_c[i, :], stds_accuracies[i, :], c=colors[i]) plt.legend(loc='upper left', fontsize='small') plt.ylim([0, 1.2]) plt.xlim(min(n_candidates_values), max(n_candidates_values)) plt.ylabel("Accuracy") plt.xlabel("n_candidates") plt.grid(which='both') plt.title("Accuracy variation with n_candidates") # Plot the accuracy variation with `n_estimators` plt.figure() plt.scatter(n_estimators_values, accuracies_trees, c='k') plt.plot(n_estimators_values, accuracies_trees, c='g') plt.ylim([0, 1.2]) plt.xlim(min(n_estimators_values), max(n_estimators_values)) plt.ylabel("Accuracy") plt.xlabel("n_estimators") plt.grid(which='both') plt.title("Accuracy variation with n_estimators") plt.show()
bsd-3-clause
Fireblend/scikit-learn
examples/neighbors/plot_approximate_nearest_neighbors_hyperparameters.py
226
5170
""" ================================================= Hyper-parameters of Approximate Nearest Neighbors ================================================= This example demonstrates the behaviour of the accuracy of the nearest neighbor queries of Locality Sensitive Hashing Forest as the number of candidates and the number of estimators (trees) vary. In the first plot, accuracy is measured with the number of candidates. Here, the term "number of candidates" refers to maximum bound for the number of distinct points retrieved from each tree to calculate the distances. Nearest neighbors are selected from this pool of candidates. Number of estimators is maintained at three fixed levels (1, 5, 10). In the second plot, the number of candidates is fixed at 50. Number of trees is varied and the accuracy is plotted against those values. To measure the accuracy, the true nearest neighbors are required, therefore :class:`sklearn.neighbors.NearestNeighbors` is used to compute the exact neighbors. """ from __future__ import division print(__doc__) # Author: Maheshakya Wijewardena <maheshakya.10@cse.mrt.ac.lk> # # License: BSD 3 clause ############################################################################### import numpy as np from sklearn.datasets.samples_generator import make_blobs from sklearn.neighbors import LSHForest from sklearn.neighbors import NearestNeighbors import matplotlib.pyplot as plt # Initialize size of the database, iterations and required neighbors. n_samples = 10000 n_features = 100 n_queries = 30 rng = np.random.RandomState(42) # Generate sample data X, _ = make_blobs(n_samples=n_samples + n_queries, n_features=n_features, centers=10, random_state=0) X_index = X[:n_samples] X_query = X[n_samples:] # Get exact neighbors nbrs = NearestNeighbors(n_neighbors=1, algorithm='brute', metric='cosine').fit(X_index) neighbors_exact = nbrs.kneighbors(X_query, return_distance=False) # Set `n_candidate` values n_candidates_values = np.linspace(10, 500, 5).astype(np.int) n_estimators_for_candidate_value = [1, 5, 10] n_iter = 10 stds_accuracies = np.zeros((len(n_estimators_for_candidate_value), n_candidates_values.shape[0]), dtype=float) accuracies_c = np.zeros((len(n_estimators_for_candidate_value), n_candidates_values.shape[0]), dtype=float) # LSH Forest is a stochastic index: perform several iteration to estimate # expected accuracy and standard deviation displayed as error bars in # the plots for j, value in enumerate(n_estimators_for_candidate_value): for i, n_candidates in enumerate(n_candidates_values): accuracy_c = [] for seed in range(n_iter): lshf = LSHForest(n_estimators=value, n_candidates=n_candidates, n_neighbors=1, random_state=seed) # Build the LSH Forest index lshf.fit(X_index) # Get neighbors neighbors_approx = lshf.kneighbors(X_query, return_distance=False) accuracy_c.append(np.sum(np.equal(neighbors_approx, neighbors_exact)) / n_queries) stds_accuracies[j, i] = np.std(accuracy_c) accuracies_c[j, i] = np.mean(accuracy_c) # Set `n_estimators` values n_estimators_values = [1, 5, 10, 20, 30, 40, 50] accuracies_trees = np.zeros(len(n_estimators_values), dtype=float) # Calculate average accuracy for each value of `n_estimators` for i, n_estimators in enumerate(n_estimators_values): lshf = LSHForest(n_estimators=n_estimators, n_neighbors=1) # Build the LSH Forest index lshf.fit(X_index) # Get neighbors neighbors_approx = lshf.kneighbors(X_query, return_distance=False) accuracies_trees[i] = np.sum(np.equal(neighbors_approx, neighbors_exact))/n_queries ############################################################################### # Plot the accuracy variation with `n_candidates` plt.figure() colors = ['c', 'm', 'y'] for i, n_estimators in enumerate(n_estimators_for_candidate_value): label = 'n_estimators = %d ' % n_estimators plt.plot(n_candidates_values, accuracies_c[i, :], 'o-', c=colors[i], label=label) plt.errorbar(n_candidates_values, accuracies_c[i, :], stds_accuracies[i, :], c=colors[i]) plt.legend(loc='upper left', fontsize='small') plt.ylim([0, 1.2]) plt.xlim(min(n_candidates_values), max(n_candidates_values)) plt.ylabel("Accuracy") plt.xlabel("n_candidates") plt.grid(which='both') plt.title("Accuracy variation with n_candidates") # Plot the accuracy variation with `n_estimators` plt.figure() plt.scatter(n_estimators_values, accuracies_trees, c='k') plt.plot(n_estimators_values, accuracies_trees, c='g') plt.ylim([0, 1.2]) plt.xlim(min(n_estimators_values), max(n_estimators_values)) plt.ylabel("Accuracy") plt.xlabel("n_estimators") plt.grid(which='both') plt.title("Accuracy variation with n_estimators") plt.show()
bsd-3-clause
jmargeta/scikit-learn
sklearn/decomposition/tests/test_dict_learning.py
8
7108
import numpy as np from sklearn.utils.testing import assert_array_almost_equal from sklearn.utils.testing import assert_array_equal from sklearn.utils.testing import assert_equal from sklearn.utils.testing import SkipTest from sklearn.utils.testing import assert_true from sklearn.utils.testing import assert_less from sklearn.utils.testing import assert_raises from sklearn.decomposition import DictionaryLearning from sklearn.decomposition import MiniBatchDictionaryLearning from sklearn.decomposition import SparseCoder from sklearn.decomposition import dict_learning_online from sklearn.decomposition import sparse_encode rng_global = np.random.RandomState(0) n_samples, n_features = 10, 8 X = rng_global.randn(n_samples, n_features) def test_dict_learning_shapes(): n_components = 5 dico = DictionaryLearning(n_components, random_state=0).fit(X) assert_true(dico.components_.shape == (n_components, n_features)) def test_dict_learning_overcomplete(): n_components = 12 dico = DictionaryLearning(n_components, random_state=0).fit(X) assert_true(dico.components_.shape == (n_components, n_features)) def test_dict_learning_reconstruction(): n_components = 12 dico = DictionaryLearning(n_components, transform_algorithm='omp', transform_alpha=0.001, random_state=0) code = dico.fit(X).transform(X) assert_array_almost_equal(np.dot(code, dico.components_), X) dico.set_params(transform_algorithm='lasso_lars') code = dico.transform(X) assert_array_almost_equal(np.dot(code, dico.components_), X, decimal=2) # used to test lars here too, but there's no guarantee the number of # nonzero atoms is right. def test_dict_learning_nonzero_coefs(): n_components = 4 dico = DictionaryLearning(n_components, transform_algorithm='lars', transform_n_nonzero_coefs=3, random_state=0) code = dico.fit(X).transform(X[1]) assert_true(len(np.flatnonzero(code)) == 3) dico.set_params(transform_algorithm='omp') code = dico.transform(X[1]) assert_equal(len(np.flatnonzero(code)), 3) def test_dict_learning_unknown_fit_algorithm(): n_components = 5 dico = DictionaryLearning(n_components, fit_algorithm='<unknown>') assert_raises(ValueError, dico.fit, X) def test_dict_learning_split(): n_components = 5 dico = DictionaryLearning(n_components, transform_algorithm='threshold', random_state=0) code = dico.fit(X).transform(X) dico.split_sign = True split_code = dico.transform(X) assert_array_equal(split_code[:, :n_components] - split_code[:, n_components:], code) def test_dict_learning_online_shapes(): rng = np.random.RandomState(0) n_components = 8 code, dictionary = dict_learning_online(X, n_components=n_components, alpha=1, random_state=rng) assert_equal(code.shape, (n_samples, n_components)) assert_equal(dictionary.shape, (n_components, n_features)) assert_equal(np.dot(code, dictionary).shape, X.shape) def test_dict_learning_online_verbosity(): n_components = 5 # test verbosity from sklearn.externals.six.moves import cStringIO as StringIO import sys old_stdout = sys.stdout sys.stdout = StringIO() dico = MiniBatchDictionaryLearning(n_components, n_iter=20, verbose=1, random_state=0) dico.fit(X) dico = MiniBatchDictionaryLearning(n_components, n_iter=20, verbose=2, random_state=0) dico.fit(X) dict_learning_online(X, n_components=n_components, alpha=1, verbose=1, random_state=0) dict_learning_online(X, n_components=n_components, alpha=1, verbose=2, random_state=0) sys.stdout = old_stdout assert_true(dico.components_.shape == (n_components, n_features)) def test_dict_learning_online_estimator_shapes(): n_components = 5 dico = MiniBatchDictionaryLearning(n_components, n_iter=20, random_state=0) dico.fit(X) assert_true(dico.components_.shape == (n_components, n_features)) def test_dict_learning_online_overcomplete(): n_components = 12 dico = MiniBatchDictionaryLearning(n_components, n_iter=20, random_state=0).fit(X) assert_true(dico.components_.shape == (n_components, n_features)) def test_dict_learning_online_initialization(): n_components = 12 rng = np.random.RandomState(0) V = rng.randn(n_components, n_features) dico = MiniBatchDictionaryLearning(n_components, n_iter=0, dict_init=V, random_state=0).fit(X) assert_array_equal(dico.components_, V) def test_dict_learning_online_partial_fit(): # this test was not actually passing before! raise SkipTest n_components = 12 rng = np.random.RandomState(0) V = rng.randn(n_components, n_features) # random init V /= np.sum(V ** 2, axis=1)[:, np.newaxis] dico1 = MiniBatchDictionaryLearning(n_components, n_iter=10, batch_size=1, shuffle=False, dict_init=V, random_state=0).fit(X) dico2 = MiniBatchDictionaryLearning(n_components, n_iter=1, dict_init=V, random_state=0) for ii, sample in enumerate(X): dico2.partial_fit(sample, iter_offset=ii * dico2.n_iter) # if ii == 1: break assert_true(not np.all(sparse_encode(X, dico1.components_, alpha=100) == 0)) assert_array_equal(dico1.components_, dico2.components_) def test_sparse_encode_shapes(): n_components = 12 rng = np.random.RandomState(0) V = rng.randn(n_components, n_features) # random init V /= np.sum(V ** 2, axis=1)[:, np.newaxis] for algo in ('lasso_lars', 'lasso_cd', 'lars', 'omp', 'threshold'): code = sparse_encode(X, V, algorithm=algo) assert_equal(code.shape, (n_samples, n_components)) def test_sparse_encode_error(): n_components = 12 rng = np.random.RandomState(0) V = rng.randn(n_components, n_features) # random init V /= np.sum(V ** 2, axis=1)[:, np.newaxis] code = sparse_encode(X, V, alpha=0.001) assert_true(not np.all(code == 0)) assert_less(np.sqrt(np.sum((np.dot(code, V) - X) ** 2)), 0.1) def test_unknown_method(): n_components = 12 rng = np.random.RandomState(0) V = rng.randn(n_components, n_features) # random init assert_raises(ValueError, sparse_encode, X, V, algorithm="<unknown>") def test_sparse_coder_estimator(): n_components = 12 rng = np.random.RandomState(0) V = rng.randn(n_components, n_features) # random init V /= np.sum(V ** 2, axis=1)[:, np.newaxis] code = SparseCoder(dictionary=V, transform_algorithm='lasso_lars', transform_alpha=0.001).transform(X) assert_true(not np.all(code == 0)) assert_less(np.sqrt(np.sum((np.dot(code, V) - X) ** 2)), 0.1)
bsd-3-clause
DistrictDataLabs/yellowbrick
tests/test_model_selection/test_rfecv.py
1
6509
# tests.test_model_selection.test_rfecv # Tests for the RFECV visualizer # # Author: Benjamin Bengfort # Created: Tue Apr 03 17:35:16 2018 -0400 # # Copyright (C) 2018 The scikit-yb developers # For license information, see LICENSE.txt # # ID: test_rfecv.py [a4599db] rebeccabilbro@users.noreply.github.com $ """ Tests for the RFECV visualizer """ ########################################################################## ## Imports ########################################################################## import sys import pytest import numpy as np import numpy.testing as npt from unittest.mock import patch from tests.fixtures import Dataset from tests.base import VisualTestCase from yellowbrick.model_selection.rfecv import * from yellowbrick.datasets import load_occupancy from yellowbrick.exceptions import YellowbrickValueError from sklearn.svm import SVC from sklearn.datasets import make_classification from sklearn.model_selection import ShuffleSplit from sklearn.model_selection import StratifiedKFold from sklearn.linear_model import LogisticRegression from sklearn.ensemble import RandomForestClassifier try: import pandas as pd except ImportError: pd = None ########################################################################## ## Fixtures ########################################################################## @pytest.fixture(scope="class") def dataset(request): """ Creates a multiclass classification dataset fixture for RFECV """ X, y = make_classification( n_samples=300, n_features=5, n_informative=3, n_repeated=0, n_classes=4, n_clusters_per_class=1, random_state=0, ) dataset = Dataset(X, y) request.cls.dataset = dataset ########################################################################## ## Test Cases ########################################################################## @pytest.mark.usefixtures("dataset") class TestRFECV(VisualTestCase): """ Test the RFECV visualizer """ @patch.object(RFECV, "draw") def test_fit(self, mock_draw): """ Assert that fit returns self and creates expected properties with NB """ X, y = self.dataset params = ( "n_features_", "support_", "ranking_", "cv_scores_", "rfe_estimator_", "n_feature_subsets_", ) rf = RandomForestClassifier() oz = RFECV(rf) for param in params: assert not hasattr(oz, param) # Assert original estimator is wrapped assert oz._wrapped is rf assert oz.fit(X, y) is oz mock_draw.assert_called_once() for param in params: assert hasattr(oz, param) # Assert rfe estimator is now wrapped assert oz._wrapped is not rf assert oz._wrapped is oz.rfe_estimator_ @pytest.mark.xfail(sys.platform == "win32", reason="images not close on windows") def test_rfecv_classification(self): """ Test image closeness on a classification dataset with an SVM """ cv = ShuffleSplit(3, random_state=21) oz = RFECV(SVC(kernel="linear", C=1), cv=cv) oz.fit(self.dataset.X, self.dataset.y) oz.finalize() self.assert_images_similar(oz, remove_legend=True) @pytest.mark.xfail(sys.platform == "win32", reason="images not close on windows") @pytest.mark.filterwarnings("ignore:F-score is ill-defined") def test_quick_method(self): """ Test the rfecv quick method works with LogisticRegression """ cv = ShuffleSplit(2, random_state=14) model = LogisticRegression() X, y = self.dataset viz = rfecv(model, X, y, step=2, cv=cv, scoring="f1_weighted", show=False) self.assert_images_similar(viz, remove_legend=True) @pytest.mark.xfail(sys.platform == "win32", reason="images not close on windows") @pytest.mark.skipif(pd is None, reason="test requires pandas") def test_pandas_integration(self): """ Test on a real dataset with pandas DataFrame and Series """ data = load_occupancy(return_dataset=True) X, y = data.to_pandas() # Use only the first 100 samples so the test will run faster X_t = X[:100] y_t = y[:100] assert isinstance(X_t, pd.DataFrame) assert isinstance(y_t, pd.Series) cv = StratifiedKFold(n_splits=4, shuffle=True, random_state=32) oz = RFECV(RandomForestClassifier(random_state=83), cv=cv) oz.fit(X_t, y_t) oz.finalize() self.assert_images_similar(oz, remove_legend=True) @pytest.mark.xfail(sys.platform == "win32", reason="images not close on windows") def test_numpy_integration(self): """ Test on a real dataset with numpy ndarray """ data = load_occupancy(return_dataset=True) X, y = data.to_numpy() # Use only the first 100 samples so the test will run faster X_t = X[:100] y_t = y[:100] assert isinstance(X_t, np.ndarray) assert isinstance(y_t, np.ndarray) cv = StratifiedKFold(n_splits=4, shuffle=True, random_state=32) oz = RFECV(RandomForestClassifier(random_state=83), cv=cv) oz.fit(X_t, y_t) oz.finalize() self.assert_images_similar(oz, remove_legend=True) @pytest.mark.parametrize("step", [0, -1, -5]) def test_invalid_step(self, step): """ Test step hyperparam validation """ with pytest.raises(YellowbrickValueError, match="step must be >0"): oz = RFECV(SVC(kernel="linear"), step=step) oz.fit(self.dataset.X, self.dataset.y) def test_rfecv_step(self): """ Test RFECV step=5 with LogisticRegression """ X, y = make_classification( n_samples=200, n_features=30, n_informative=18, n_redundant=6, n_repeated=0, n_classes=8, n_clusters_per_class=1, random_state=0, ) oz = RFECV(LogisticRegression(random_state=32), step=5).fit(X, y) assert hasattr(oz, "n_feature_subsets_") npt.assert_array_equal(oz.n_feature_subsets_, np.arange(1, 35, 5)) oz.finalize() tol = 1.75 if sys.platform == "win32" else 0.25 self.assert_images_similar(oz, tol=tol, remove_legend=True)
apache-2.0
DistrictDataLabs/yellowbrick
yellowbrick/pipeline.py
1
4075
# yellowbrick.pipeline # Implements a visual pipeline that subclasses Scikit-Learn pipelines. # # Author: Benjamin Bengfort # Created: Fri Oct 07 21:41:06 2016 -0400 # # Copyright (C) 2016 The sckit-yb developers # For license information, see LICENSE.txt # # ID: pipeline.py [1efae1f] benjamin@bengfort.com $ """ Implements a visual pipeline that subclasses Scikit-Learn pipelines. """ ########################################################################## ## Imports ########################################################################## from os import path from .base import Visualizer from .utils.helpers import slugify from sklearn.pipeline import Pipeline ########################################################################## ## Visual Pipeline ########################################################################## class VisualPipeline(Pipeline): """Pipeline of transforms and visualizers with a final estimator. Sequentially apply a list of transforms, visualizers, and a final estimator which may be evaluated by additional visualizers. Intermediate steps of the pipeline must be 'transforms', that is, they must implement fit and transform methods. The final estimator only needs to implement fit. Any step that implements draw or show methods can be called sequentially directly from the VisualPipeline, allowing multiple visual diagnostics to be generated, displayed, and saved on demand. If draw or show is not called, the visual pipeline should be equivalent to the simple pipeline to ensure no reduction in performance. The purpose of the pipeline is to assemble several steps that can be cross-validated together while setting different parameters. These steps can be visually diagnosed by visualizers at every point in the pipeline. Parameters ---------- steps : list List of (name, transform) tuples (implementing fit/transform) that are chained, in the order in which they are chained, with the last object an estimator. Any intermediate step can be a FeatureVisualizer and the last step can be a ScoreVisualizer. Attributes ---------- named_steps : dict Read-only attribute to access any step parameter by user given name. Keys are step names and values are step parameters. visual_steps : dict Read-only attribute to access any visualizer in he pipeline by user given name. Keys are step names and values are visualizer steps. """ @property def visual_steps(self): return dict(step for step in self.steps if isinstance(step[1], Visualizer)) def show(self, outdir=None, ext=".pdf", **kwargs): """ A single entry point to rendering all visualizations in the visual pipeline. The rendering for the output depends on the backend context, but for path based renderings (e.g. saving to a file), specify a directory and extension to compse an outpath to save each visualization (file names will be based on the named step). Parameters ---------- outdir : path The directory to save visualizations to. ext : string, default = ".pdf" The extension of the file to save the visualization to. kwargs : dict Keyword arguments to pass to the ``show()`` method of all steps. """ axes = [] for name, step in self.visual_steps.items(): if outdir is not None: outpath = path.join(outdir, slugify(name) + ext) else: outpath = None ax = step.show(outpath=outpath, **kwargs) axes.append(ax) # Return axes array to ensure figures are shown in notebook return axes def fit_transform_show(self, X, y=None, outpath=None, **kwargs): """ Fit the model and transforms and then call show. """ Xp = self.fit_transform(X, y, **kwargs) self.show(outpath, **kwargs) return Xp
apache-2.0
linrio/WhetherOrNotMe
testme.py
1
2028
# -*- coding utf-8 -*- import cv2 import os import numpy as np from sklearn.model_selection import train_test_split import random import tensorflow as tf def read_data(img_path, image_h = 64, image_w = 64): image_data = [] label_data = [] image = cv2.imread(img_path) #cv2.namedWindow("Image") #cv2.imshow("Image",image) #cv2.waitKey(0) h,w,_ = image.shape longest_edge = max(h,w) top, bottom, left, right = (0, 0, 0, 0) dh,dw = (0,0) if h < longest_edge: dh = longest_edge - h top = dh // 2 bottom = dh - top elif w < longest_edge: dw = longest_edge - w left = dw // 2 right = dw - left else: pass image_pad = cv2.copyMakeBorder(image, top, bottom, left, right, cv2.BORDER_CONSTANT, value=[0, 0, 0]) image = cv2.resize(image_pad, (image_h, image_w)) image_data.append(image) label_data.append(img_path) image_data = np.array(image_data) train_x, test_x, train_y, test_y = train_test_split(image_data, label_data, test_size=0.05, random_state=random.randint(0, 100)) X = tf.placeholder(tf.float32,[None, 64, 64, 3]) Y = tf.placeholder(tf.float32, [None, 2]) return Y #img_path = '4833.jpg' #print(read_data(img_path)) x_data = np.float32(np.random.rand(2,100)) y_data = np.dot([0.100, 0.200], x_data) + 0.300 b = tf.Variable(tf.zeros([1]), name='B') W = tf.Variable(tf.random_uniform([1, 2], -1.0, 1.0), name='W') y = tf.add(tf.matmul(W, x_data, name='MatMul'), b ,name='add') loss = tf.reduce_mean(tf.square(tf.subtract(y, y_data, name='Sub'), name='Square'), name='ReduceMean') optimizer = tf.train.GradientDescentOptimizer(0.001, name='Optimizer') train = optimizer.minimize(loss, name='minimize') summaries = [tf.summary.histogram('W',W), tf.summary.histogram('b', b), tf.summary.scalar('loss', loss)] summary_op = tf.summary.merge(summaries) print(summary_op)
apache-2.0
msimacek/samba
third_party/dnspython/examples/zonediff.py
79
10711
#!/usr/bin/env python # # Small library and commandline tool to do logical diffs of zonefiles # ./zonediff -h gives you help output # # Requires dnspython to do all the heavy lifting # # (c)2009 Dennis Kaarsemaker <dennis@kaarsemaker.net> # # Permission to use, copy, modify, and distribute this software and its # documentation for any purpose with or without fee is hereby granted, # provided that the above copyright notice and this permission notice # appear in all copies. # # THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES # WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF # MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR # ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES # WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN # ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT # OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. """See diff_zones.__doc__ for more information""" __all__ = ['diff_zones', 'format_changes_plain', 'format_changes_html'] try: import dns.zone except ImportError: import sys sys.stderr.write("Please install dnspython") sys.exit(1) def diff_zones(zone1, zone2, ignore_ttl=False, ignore_soa=False): """diff_zones(zone1, zone2, ignore_ttl=False, ignore_soa=False) -> changes Compares two dns.zone.Zone objects and returns a list of all changes in the format (name, oldnode, newnode). If ignore_ttl is true, a node will not be added to this list if the only change is its TTL. If ignore_soa is true, a node will not be added to this list if the only changes is a change in a SOA Rdata set. The returned nodes do include all Rdata sets, including unchanged ones. """ changes = [] for name in zone1: name = str(name) n1 = zone1.get_node(name) n2 = zone2.get_node(name) if not n2: changes.append((str(name), n1, n2)) elif _nodes_differ(n1, n2, ignore_ttl, ignore_soa): changes.append((str(name), n1, n2)) for name in zone2: n1 = zone1.get_node(name) if not n1: n2 = zone2.get_node(name) changes.append((str(name), n1, n2)) return changes def _nodes_differ(n1, n2, ignore_ttl, ignore_soa): if ignore_soa or not ignore_ttl: # Compare datasets directly for r in n1.rdatasets: if ignore_soa and r.rdtype == dns.rdatatype.SOA: continue if r not in n2.rdatasets: return True if not ignore_ttl: return r.ttl != n2.find_rdataset(r.rdclass, r.rdtype).ttl for r in n2.rdatasets: if ignore_soa and r.rdtype == dns.rdatatype.SOA: continue if r not in n1.rdatasets: return True else: return n1 != n2 def format_changes_plain(oldf, newf, changes, ignore_ttl=False): """format_changes(oldfile, newfile, changes, ignore_ttl=False) -> str Given 2 filenames and a list of changes from diff_zones, produce diff-like output. If ignore_ttl is True, TTL-only changes are not displayed""" ret = "--- %s\n+++ %s\n" % (oldf, newf) for name, old, new in changes: ret += "@ %s\n" % name if not old: for r in new.rdatasets: ret += "+ %s\n" % str(r).replace('\n','\n+ ') elif not new: for r in old.rdatasets: ret += "- %s\n" % str(r).replace('\n','\n+ ') else: for r in old.rdatasets: if r not in new.rdatasets or (r.ttl != new.find_rdataset(r.rdclass, r.rdtype).ttl and not ignore_ttl): ret += "- %s\n" % str(r).replace('\n','\n+ ') for r in new.rdatasets: if r not in old.rdatasets or (r.ttl != old.find_rdataset(r.rdclass, r.rdtype).ttl and not ignore_ttl): ret += "+ %s\n" % str(r).replace('\n','\n+ ') return ret def format_changes_html(oldf, newf, changes, ignore_ttl=False): """format_changes(oldfile, newfile, changes, ignore_ttl=False) -> str Given 2 filenames and a list of changes from diff_zones, produce nice html output. If ignore_ttl is True, TTL-only changes are not displayed""" ret = '''<table class="zonediff"> <thead> <tr> <th>&nbsp;</th> <th class="old">%s</th> <th class="new">%s</th> </tr> </thead> <tbody>\n''' % (oldf, newf) for name, old, new in changes: ret += ' <tr class="rdata">\n <td class="rdname">%s</td>\n' % name if not old: for r in new.rdatasets: ret += ' <td class="old">&nbsp;</td>\n <td class="new">%s</td>\n' % str(r).replace('\n','<br />') elif not new: for r in old.rdatasets: ret += ' <td class="old">%s</td>\n <td class="new">&nbsp;</td>\n' % str(r).replace('\n','<br />') else: ret += ' <td class="old">' for r in old.rdatasets: if r not in new.rdatasets or (r.ttl != new.find_rdataset(r.rdclass, r.rdtype).ttl and not ignore_ttl): ret += str(r).replace('\n','<br />') ret += '</td>\n' ret += ' <td class="new">' for r in new.rdatasets: if r not in old.rdatasets or (r.ttl != old.find_rdataset(r.rdclass, r.rdtype).ttl and not ignore_ttl): ret += str(r).replace('\n','<br />') ret += '</td>\n' ret += ' </tr>\n' return ret + ' </tbody>\n</table>' # Make this module usable as a script too. if __name__ == '__main__': import optparse import subprocess import sys import traceback usage = """%prog zonefile1 zonefile2 - Show differences between zones in a diff-like format %prog [--git|--bzr|--rcs] zonefile rev1 [rev2] - Show differences between two revisions of a zonefile The differences shown will be logical differences, not textual differences. """ p = optparse.OptionParser(usage=usage) p.add_option('-s', '--ignore-soa', action="store_true", default=False, dest="ignore_soa", help="Ignore SOA-only changes to records") p.add_option('-t', '--ignore-ttl', action="store_true", default=False, dest="ignore_ttl", help="Ignore TTL-only changes to Rdata") p.add_option('-T', '--traceback', action="store_true", default=False, dest="tracebacks", help="Show python tracebacks when errors occur") p.add_option('-H', '--html', action="store_true", default=False, dest="html", help="Print HTML output") p.add_option('-g', '--git', action="store_true", default=False, dest="use_git", help="Use git revisions instead of real files") p.add_option('-b', '--bzr', action="store_true", default=False, dest="use_bzr", help="Use bzr revisions instead of real files") p.add_option('-r', '--rcs', action="store_true", default=False, dest="use_rcs", help="Use rcs revisions instead of real files") opts, args = p.parse_args() opts.use_vc = opts.use_git or opts.use_bzr or opts.use_rcs def _open(what, err): if isinstance(what, basestring): # Open as normal file try: return open(what, 'rb') except: sys.stderr.write(err + "\n") if opts.tracebacks: traceback.print_exc() else: # Must be a list, open subprocess try: proc = subprocess.Popen(what, stdout=subprocess.PIPE) proc.wait() if proc.returncode == 0: return proc.stdout sys.stderr.write(err + "\n") except: sys.stderr.write(err + "\n") if opts.tracebacks: traceback.print_exc() if not opts.use_vc and len(args) != 2: p.print_help() sys.exit(64) if opts.use_vc and len(args) not in (2,3): p.print_help() sys.exit(64) # Open file desriptors if not opts.use_vc: oldn, newn = args else: if len(args) == 3: filename, oldr, newr = args oldn = "%s:%s" % (oldr, filename) newn = "%s:%s" % (newr, filename) else: filename, oldr = args newr = None oldn = "%s:%s" % (oldr, filename) newn = filename old, new = None, None oldz, newz = None, None if opts.use_bzr: old = _open(["bzr", "cat", "-r" + oldr, filename], "Unable to retrieve revision %s of %s" % (oldr, filename)) if newr != None: new = _open(["bzr", "cat", "-r" + newr, filename], "Unable to retrieve revision %s of %s" % (newr, filename)) elif opts.use_git: old = _open(["git", "show", oldn], "Unable to retrieve revision %s of %s" % (oldr, filename)) if newr != None: new = _open(["git", "show", newn], "Unable to retrieve revision %s of %s" % (newr, filename)) elif opts.use_rcs: old = _open(["co", "-q", "-p", "-r" + oldr, filename], "Unable to retrieve revision %s of %s" % (oldr, filename)) if newr != None: new = _open(["co", "-q", "-p", "-r" + newr, filename], "Unable to retrieve revision %s of %s" % (newr, filename)) if not opts.use_vc: old = _open(oldn, "Unable to open %s" % oldn) if not opts.use_vc or newr == None: new = _open(newn, "Unable to open %s" % newn) if not old or not new: sys.exit(65) # Parse the zones try: oldz = dns.zone.from_file(old, origin = '.', check_origin=False) except dns.exception.DNSException: sys.stderr.write("Incorrect zonefile: %s\n", old) if opts.tracebacks: traceback.print_exc() try: newz = dns.zone.from_file(new, origin = '.', check_origin=False) except dns.exception.DNSException: sys.stderr.write("Incorrect zonefile: %s\n" % new) if opts.tracebacks: traceback.print_exc() if not oldz or not newz: sys.exit(65) changes = diff_zones(oldz, newz, opts.ignore_ttl, opts.ignore_soa) changes.sort() if not changes: sys.exit(0) if opts.html: print format_changes_html(oldn, newn, changes, opts.ignore_ttl) else: print format_changes_plain(oldn, newn, changes, opts.ignore_ttl) sys.exit(1)
gpl-3.0
woobe/h2o
py/testdir_ec2/test_rf_covtype.py
2
2590
import unittest, time, sys sys.path.extend(['.','..','py']) import h2o, h2o_cmd, h2o_hosts, h2o_rf # RF train parameters paramsTrainRF = { 'ntree' : 100, 'depth' : 300, 'bin_limit' : 20000, 'stat_type' : 'ENTROPY', 'out_of_bag_error_estimate': 1, 'exclusive_split_limit' : 0, 'timeoutSecs': 14800, } # RF test parameters paramsScoreRF = { # scoring requires the response_variable. it defaults to last, so normally # we don't need to specify. But put this here and (above if used) # in case a dataset doesn't use last col 'response_variable': None, 'timeoutSecs': 14800, 'out_of_bag_error_estimate': 0, } trainDS = { 's3bucket' : 'h2o-datasets', 'filename' : 'covtype.data', 'timeoutSecs' : 14800, 'header' : 1 } scoreDS = { 's3bucket' : 'h2o-datasets', 'filename' : 'covtype.data', 'timeoutSecs' : 14800, 'header' : 1 } PARSE_TIMEOUT=14800 class Basic(unittest.TestCase): def tearDown(self): h2o.check_sandbox_for_errors() @classmethod def setUpClass(cls): h2o_hosts.build_cloud_with_hosts() @classmethod def tearDownClass(cls): h2o.tear_down_cloud() def parseS3File(self, s3bucket, filename, **kwargs): start = time.time() parseResult = h2o_cmd.parseS3File(bucket=s3bucket, filename=filename, **kwargs) parse_time = time.time() - start h2o.verboseprint("py-S3 parse took {0} sec".format(parse_time)) parseResult['python_call_timer'] = parse_time return parseResult def loadTrainData(self): kwargs = trainDS.copy() trainKey = self.parseS3File(**kwargs) return trainKey def loadScoreData(self): kwargs = scoreDS.copy() scoreKey = self.parseS3File(**kwargs) return scoreKey def test_RF(self): trainKey = self.loadTrainData() kwargs = paramsTrainRF.copy() trainResult = h2o_rf.trainRF(trainKey, **kwargs) scoreKey = self.loadScoreData() kwargs = paramsScoreRF.copy() scoreResult = h2o_rf.scoreRF(scoreKey, trainResult, **kwargs) print "\nTrain\n=========={0}".format(h2o_rf.pp_rf_result(trainResult)) print "\nScoring\n========={0}".format(h2o_rf.pp_rf_result(scoreResult)) if __name__ == '__main__': h2o.unit_main()
apache-2.0
woobe/h2o
py/testdir_single_jvm/test_quant_cols.py
2
6079
import unittest, random, sys, time, re, getpass sys.path.extend(['.','..','py']) import h2o, h2o_cmd, h2o_hosts, h2o_browse as h2b, h2o_import as h2i, h2o_glm, h2o_util import h2o_print as h2p, h2o_gbm, h2o_summ DO_PLOT = getpass.getuser()=='kevin' DO_MEDIAN = False MAX_QBINS = 1000 MULTI_PASS = 1 class Basic(unittest.TestCase): def tearDown(self): h2o.check_sandbox_for_errors() @classmethod def setUpClass(cls): global SEED, localhost SEED = h2o.setup_random_seed() localhost = h2o.decide_if_localhost() if (localhost): h2o.build_cloud(1,java_heap_GB=14) else: h2o_hosts.build_cloud_with_hosts() @classmethod def tearDownClass(cls): # h2o.sleep(3600) h2o.tear_down_cloud() def test_quant_cols(self): h2o.beta_features = True SYNDATASETS_DIR = h2o.make_syn_dir() if getpass.getuser()=='kevin': tryList = [ (None, '/home/kevin/Downloads/t.csv', 15, 11, 'cE', 300), ('home-0xdiag-datasets', 'airlines/year2013.csv', None, None, 'cE', 300), ] else: tryList = [ ('home-0xdiag-datasets', 'airlines/year2013.csv', None, None, 'cE', 300), ] # h2b.browseTheCloud() trial = 0 for (bucket, csvPathname, iColCount, oColCount, hex_key, timeoutSecs) in tryList: xList = [] eList = [] fList = [] # PARSE******************************************************* parseResult = h2i.import_parse(bucket=bucket, path=csvPathname, schema='put', hex_key=hex_key, timeoutSecs=200, doSummary=False) csvPathnameFull = h2i.find_folder_and_filename(bucket, csvPathname, returnFullPath=True) print "Parse result['destination_key']:", parseResult['destination_key'] inspect = h2o_cmd.runInspect(key=parseResult['destination_key']) h2o_cmd.infoFromInspect(inspect, csvPathname) numRows = inspect['numRows'] numCols = inspect['numCols'] if not oColCount: iColCount = 0 if not oColCount: oColCount = numCols colCount = iColCount + oColCount for i in range (0,numCols): print "Column", i, "summary" h2o_cmd.runSummary(key=hex_key, max_qbins=1, cols=i); # print h2o.dump_json(inspect) levels = h2o.nodes[0].levels(source=hex_key) # print "levels result:", h2o.dump_json(levels) (missingValuesDict, constantValuesDict, enumSizeDict, colTypeDict, colNameDict) = \ h2o_cmd.columnInfoFromInspect(parseResult['destination_key'], exceptionOnMissingValues=False) # error if any col has constant values if len(constantValuesDict) != 0: # raise Exception("Probably got a col NA'ed and constant values as a result %s" % constantValuesDict) print "Probably got a col NA'ed and constant values as a result %s" % constantValuesDict # start after the last input col levels = h2o.nodes[0].levels(source=hex_key); l = levels['levels'] for column in range(iColCount, iColCount+oColCount): if l[column]: print "Skipping", column, "because it's enum (says levels)" continue # QUANTILE******************************************************* quantile = 0.5 if DO_MEDIAN else .999 # first output col. always fed by an exec cut, so 0? start = time.time() # file has headers. use col index q = h2o.nodes[0].quantiles(source_key=hex_key, column=column, quantile=quantile, max_qbins=MAX_QBINS, multiple_pass=1) qresult = q['result'] h2p.red_print("result:", q['result'], "quantile", quantile, "interpolated:", q['interpolated'], "iterations", q['iterations']) elapsed = time.time() - start print "quantile end on ", hex_key, 'took', elapsed, 'seconds.' quantileTime = elapsed # don't do for enums # also get the median with a sort (h2o_summ.percentileOnSortedlist() if 1==1: h2o_summ.quantile_comparisons( csvPathnameFull, skipHeader=True, col=column, # what col to extract from the csv datatype='float', quantile=0.5 if DO_MEDIAN else 0.999, # h2oSummary2=pctile[5 if DO_MEDIAN else 10], # h2oQuantilesApprox=qresult_single, h2oQuantilesExact=qresult, use_genfromtxt=True, ) trial += 1 execTime = 0 xList.append(column) eList.append(execTime) fList.append(quantileTime) # remove all keys******************************************************* # what about hex_key? if 1==0: start = time.time() h2o.nodes[0].remove_all_keys() elapsed = time.time() - start print "remove all keys end on took", elapsed, 'seconds.' #**************************************************************** # PLOTS. look for eplot.jpg and fplot.jpg in local dir? if DO_PLOT: xLabel = 'column (0 is first)' eLabel = 'exec cut time' fLabel = 'quantile time' eListTitle = "" fListTitle = "" h2o_gbm.plotLists(xList, xLabel, eListTitle, eList, eLabel, fListTitle, fList, fLabel, server=True) if __name__ == '__main__': h2o.unit_main()
apache-2.0
anonymous-ijcai/dsw-ont-ijcai
dswont/util.py
1
3961
import json import nltk import numpy as np import os import re import shutil from sklearn import metrics def resource(filename): """Returns the absolute path to the resource file.""" current_dir = os.path.join(os.getcwd(), 'dswont') resource_dir = os.path.join(current_dir, "resources") return os.path.join(resource_dir, filename) class PersistentDict(dict): def __init__(self, filename, *args, **kwds): self.filename = filename if os.access(filename, os.R_OK): fileobj = open(filename, 'r') with fileobj: self.load(fileobj) dict.__init__(self, *args, **kwds) def sync(self): 'Write dict to disk' filename = self.filename tempname = filename + '.tmp' fileobj = open(tempname, 'w') try: self.dump(fileobj) except Exception: os.remove(tempname) raise finally: fileobj.close() shutil.move(tempname, self.filename) # atomic commit def close(self): self.sync() def __enter__(self): return self def __exit__(self, *exc_info): self.close() def dump(self, fileobj): json.dump(self, fileobj, separators=(',', ':')) def load(self, fileobj): fileobj.seek(0) try: return self.update(json.load(fileobj)) except Exception: logger.warn('Exception while loading the file: ' + e) stopwords = {line.rstrip() for line in open(resource('en-stopwords.txt')).readlines()} words_re = re.compile('\w+') def pos_tag(string_or_tokens: str): if isinstance(string_or_tokens, str): string = string_or_tokens tokens = nltk.tokenize.word_tokenize(string) else: tokens = string_or_tokens return nltk.pos_tag(tokens) def head_word_pos(phrase_or_pos_tagging): if isinstance(phrase_or_pos_tagging, str): phrase = phrase_or_pos_tagging phrase_wo_parens = re.sub('\s\(.*\)', '', phrase) pos_tagging = pos_tag(phrase_wo_parens) else: pos_tagging = phrase_or_pos_tagging pos_to_skip = {"VBN", "VBD"#, "CD" } delimiting_pos = {"DT", "IN"} delimiting_words = {"("} result = None for word, pos in pos_tagging: current_word_is_delimiter = pos in delimiting_pos or word in delimiting_words nondelimiters_encountered = result is not None if current_word_is_delimiter and nondelimiters_encountered: break elif pos not in pos_to_skip: result = word, pos if not result: raise Exception("Could not find the head word of the phrase '{}'".format(pos_tagging)) return result def measure_params_ytrue_ypred_(*params): if len(params) == 2: y_true, y_pred = params elif len(params) == 3: estimator, X, y_true = params y_pred = estimator.predict(X) else: raise ValueError( "weighted_f1 called with {} parameters, \ the correct signature is:\n weighted_f1(y_true, y_pred) \ or weighted_f1(estimator, X, y_true).".format(len(params))) return np.array(y_true, dtype=bool), np.array(y_pred, dtype=bool) def weighted_f1(*params): y_true, y_pred = measure_params_ytrue_ypred_(*params) score1 = metrics.f1_score(y_true, y_pred, pos_label=1) score2 = metrics.f1_score(y_true, y_pred, pos_label=0) nones = sum(y_true) nzeros = len(y_true) - nones return (nones * score1 + nzeros * score2) / (nones + nzeros) def f1_pos_class(*params): y_true, y_pred = measure_params_ytrue_ypred_(*params) return metrics.f1_score(y_true, y_pred, pos_label=1) def f1_neg_class(*params): y_true, y_pred = measure_params_ytrue_ypred_(*params) return metrics.f1_score(y_true, y_pred, pos_label=0) def accuracy_score(*params): y_true, y_pred = measure_params_ytrue_ypred_(*params) return metrics.accuracy_score(y_true, y_pred)
gpl-3.0
ningchi/scikit-learn
sklearn/ensemble/tests/test_forest.py
14
34860
""" Testing for the forest module (sklearn.ensemble.forest). """ # Authors: Gilles Louppe, # Brian Holt, # Andreas Mueller, # Arnaud Joly # License: BSD 3 clause import pickle from collections import defaultdict from itertools import product import numpy as np from scipy.sparse import csr_matrix, csc_matrix, coo_matrix from sklearn.utils.testing import assert_almost_equal from sklearn.utils.testing import assert_array_almost_equal from sklearn.utils.testing import assert_array_equal from sklearn.utils.testing import assert_equal from sklearn.utils.testing import assert_false, assert_true from sklearn.utils.testing import assert_less, assert_greater from sklearn.utils.testing import assert_greater_equal from sklearn.utils.testing import assert_raises from sklearn.utils.testing import assert_warns from sklearn.utils.testing import ignore_warnings from sklearn import datasets from sklearn.decomposition import TruncatedSVD from sklearn.ensemble import ExtraTreesClassifier from sklearn.ensemble import ExtraTreesRegressor from sklearn.ensemble import RandomForestClassifier from sklearn.ensemble import RandomForestRegressor from sklearn.ensemble import RandomTreesEmbedding from sklearn.grid_search import GridSearchCV from sklearn.svm import LinearSVC from sklearn.utils.validation import check_random_state from sklearn.tree.tree import SPARSE_SPLITTERS # toy sample X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]] y = [-1, -1, -1, 1, 1, 1] T = [[-1, -1], [2, 2], [3, 2]] true_result = [-1, 1, 1] # also load the iris dataset # and randomly permute it iris = datasets.load_iris() rng = check_random_state(0) perm = rng.permutation(iris.target.size) iris.data = iris.data[perm] iris.target = iris.target[perm] # also load the boston dataset # and randomly permute it boston = datasets.load_boston() perm = rng.permutation(boston.target.size) boston.data = boston.data[perm] boston.target = boston.target[perm] FOREST_CLASSIFIERS = { "ExtraTreesClassifier": ExtraTreesClassifier, "RandomForestClassifier": RandomForestClassifier, } FOREST_REGRESSORS = { "ExtraTreesRegressor": ExtraTreesRegressor, "RandomForestRegressor": RandomForestRegressor, } FOREST_TRANSFORMERS = { "RandomTreesEmbedding": RandomTreesEmbedding, } FOREST_ESTIMATORS = dict() FOREST_ESTIMATORS.update(FOREST_CLASSIFIERS) FOREST_ESTIMATORS.update(FOREST_REGRESSORS) FOREST_ESTIMATORS.update(FOREST_TRANSFORMERS) def check_classification_toy(name): """Check classification on a toy dataset.""" ForestClassifier = FOREST_CLASSIFIERS[name] clf = ForestClassifier(n_estimators=10, random_state=1) clf.fit(X, y) assert_array_equal(clf.predict(T), true_result) assert_equal(10, len(clf)) clf = ForestClassifier(n_estimators=10, max_features=1, random_state=1) clf.fit(X, y) assert_array_equal(clf.predict(T), true_result) assert_equal(10, len(clf)) # also test apply leaf_indices = clf.apply(X) assert_equal(leaf_indices.shape, (len(X), clf.n_estimators)) def test_classification_toy(): for name in FOREST_CLASSIFIERS: yield check_classification_toy, name def check_iris_criterion(name, criterion): # Check consistency on dataset iris. ForestClassifier = FOREST_CLASSIFIERS[name] clf = ForestClassifier(n_estimators=10, criterion=criterion, random_state=1) clf.fit(iris.data, iris.target) score = clf.score(iris.data, iris.target) assert_greater(score, 0.9, "Failed with criterion %s and score = %f" % (criterion, score)) clf = ForestClassifier(n_estimators=10, criterion=criterion, max_features=2, random_state=1) clf.fit(iris.data, iris.target) score = clf.score(iris.data, iris.target) assert_greater(score, 0.5, "Failed with criterion %s and score = %f" % (criterion, score)) def test_iris(): for name, criterion in product(FOREST_CLASSIFIERS, ("gini", "entropy")): yield check_iris_criterion, name, criterion def check_boston_criterion(name, criterion): # Check consistency on dataset boston house prices. ForestRegressor = FOREST_REGRESSORS[name] clf = ForestRegressor(n_estimators=5, criterion=criterion, random_state=1) clf.fit(boston.data, boston.target) score = clf.score(boston.data, boston.target) assert_greater(score, 0.95, "Failed with max_features=None, criterion %s " "and score = %f" % (criterion, score)) clf = ForestRegressor(n_estimators=5, criterion=criterion, max_features=6, random_state=1) clf.fit(boston.data, boston.target) score = clf.score(boston.data, boston.target) assert_greater(score, 0.95, "Failed with max_features=6, criterion %s " "and score = %f" % (criterion, score)) def test_boston(): for name, criterion in product(FOREST_REGRESSORS, ("mse", )): yield check_boston_criterion, name, criterion def check_regressor_attributes(name): # Regression models should not have a classes_ attribute. r = FOREST_REGRESSORS[name](random_state=0) assert_false(hasattr(r, "classes_")) assert_false(hasattr(r, "n_classes_")) r.fit([[1, 2, 3], [4, 5, 6]], [1, 2]) assert_false(hasattr(r, "classes_")) assert_false(hasattr(r, "n_classes_")) def test_regressor_attributes(): for name in FOREST_REGRESSORS: yield check_regressor_attributes, name def check_probability(name): # Predict probabilities. ForestClassifier = FOREST_CLASSIFIERS[name] with np.errstate(divide="ignore"): clf = ForestClassifier(n_estimators=10, random_state=1, max_features=1, max_depth=1) clf.fit(iris.data, iris.target) assert_array_almost_equal(np.sum(clf.predict_proba(iris.data), axis=1), np.ones(iris.data.shape[0])) assert_array_almost_equal(clf.predict_proba(iris.data), np.exp(clf.predict_log_proba(iris.data))) def test_probability(): for name in FOREST_CLASSIFIERS: yield check_probability, name def check_importances(name, X, y): # Check variable importances. ForestClassifier = FOREST_CLASSIFIERS[name] for n_jobs in [1, 2]: clf = ForestClassifier(n_estimators=10, n_jobs=n_jobs) clf.fit(X, y) importances = clf.feature_importances_ n_important = np.sum(importances > 0.1) assert_equal(importances.shape[0], 10) assert_equal(n_important, 3) X_new = clf.transform(X, threshold="mean") assert_less(0 < X_new.shape[1], X.shape[1]) # Check with sample weights sample_weight = np.ones(y.shape) sample_weight[y == 1] *= 100 clf = ForestClassifier(n_estimators=50, n_jobs=n_jobs, random_state=0) clf.fit(X, y, sample_weight=sample_weight) importances = clf.feature_importances_ assert_true(np.all(importances >= 0.0)) clf = ForestClassifier(n_estimators=50, n_jobs=n_jobs, random_state=0) clf.fit(X, y, sample_weight=3 * sample_weight) importances_bis = clf.feature_importances_ assert_almost_equal(importances, importances_bis) def test_importances(): X, y = datasets.make_classification(n_samples=1000, n_features=10, n_informative=3, n_redundant=0, n_repeated=0, shuffle=False, random_state=0) for name in FOREST_CLASSIFIERS: yield check_importances, name, X, y def check_unfitted_feature_importances(name): assert_raises(ValueError, getattr, FOREST_ESTIMATORS[name](random_state=0), "feature_importances_") def test_unfitted_feature_importances(): for name in FOREST_ESTIMATORS: yield check_unfitted_feature_importances, name def check_oob_score(name, X, y, n_estimators=20): # Check that oob prediction is a good estimation of the generalization # error. # Proper behavior est = FOREST_ESTIMATORS[name](oob_score=True, random_state=0, n_estimators=n_estimators, bootstrap=True) n_samples = X.shape[0] est.fit(X[:n_samples // 2, :], y[:n_samples // 2]) test_score = est.score(X[n_samples // 2:, :], y[n_samples // 2:]) if name in FOREST_CLASSIFIERS: assert_less(abs(test_score - est.oob_score_), 0.1) else: assert_greater(test_score, est.oob_score_) assert_greater(est.oob_score_, .8) # Check warning if not enough estimators with np.errstate(divide="ignore", invalid="ignore"): est = FOREST_ESTIMATORS[name](oob_score=True, random_state=0, n_estimators=1, bootstrap=True) assert_warns(UserWarning, est.fit, X, y) def test_oob_score(): for name in FOREST_CLASSIFIERS: yield check_oob_score, name, iris.data, iris.target # non-contiguous targets in classification yield check_oob_score, name, iris.data, iris.target * 2 + 1 for name in FOREST_REGRESSORS: yield check_oob_score, name, boston.data, boston.target, 50 def check_oob_score_raise_error(name): ForestEstimator = FOREST_ESTIMATORS[name] if name in FOREST_TRANSFORMERS: for oob_score in [True, False]: assert_raises(TypeError, ForestEstimator, oob_score=oob_score) assert_raises(NotImplementedError, ForestEstimator()._set_oob_score, X, y) else: # Unfitted / no bootstrap / no oob_score for oob_score, bootstrap in [(True, False), (False, True), (False, False)]: est = ForestEstimator(oob_score=oob_score, bootstrap=bootstrap, random_state=0) assert_false(hasattr(est, "oob_score_")) # No bootstrap assert_raises(ValueError, ForestEstimator(oob_score=True, bootstrap=False).fit, X, y) def test_oob_score_raise_error(): for name in FOREST_ESTIMATORS: yield check_oob_score_raise_error, name def check_gridsearch(name): forest = FOREST_CLASSIFIERS[name]() clf = GridSearchCV(forest, {'n_estimators': (1, 2), 'max_depth': (1, 2)}) clf.fit(iris.data, iris.target) def test_gridsearch(): # Check that base trees can be grid-searched. for name in FOREST_CLASSIFIERS: yield check_gridsearch, name def check_parallel(name, X, y): """Check parallel computations in classification""" ForestEstimator = FOREST_ESTIMATORS[name] forest = ForestEstimator(n_estimators=10, n_jobs=3, random_state=0) forest.fit(X, y) assert_equal(len(forest), 10) forest.set_params(n_jobs=1) y1 = forest.predict(X) forest.set_params(n_jobs=2) y2 = forest.predict(X) assert_array_almost_equal(y1, y2, 3) def test_parallel(): for name in FOREST_CLASSIFIERS: yield check_parallel, name, iris.data, iris.target for name in FOREST_REGRESSORS: yield check_parallel, name, boston.data, boston.target def check_pickle(name, X, y): # Check pickability. ForestEstimator = FOREST_ESTIMATORS[name] obj = ForestEstimator(random_state=0) obj.fit(X, y) score = obj.score(X, y) pickle_object = pickle.dumps(obj) obj2 = pickle.loads(pickle_object) assert_equal(type(obj2), obj.__class__) score2 = obj2.score(X, y) assert_equal(score, score2) def test_pickle(): for name in FOREST_CLASSIFIERS: yield check_pickle, name, iris.data[::2], iris.target[::2] for name in FOREST_REGRESSORS: yield check_pickle, name, boston.data[::2], boston.target[::2] def check_multioutput(name): # Check estimators on multi-output problems. X_train = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1], [-2, 1], [-1, 1], [-1, 2], [2, -1], [1, -1], [1, -2]] y_train = [[-1, 0], [-1, 0], [-1, 0], [1, 1], [1, 1], [1, 1], [-1, 2], [-1, 2], [-1, 2], [1, 3], [1, 3], [1, 3]] X_test = [[-1, -1], [1, 1], [-1, 1], [1, -1]] y_test = [[-1, 0], [1, 1], [-1, 2], [1, 3]] est = FOREST_ESTIMATORS[name](random_state=0, bootstrap=False) y_pred = est.fit(X_train, y_train).predict(X_test) assert_array_almost_equal(y_pred, y_test) if name in FOREST_CLASSIFIERS: with np.errstate(divide="ignore"): proba = est.predict_proba(X_test) assert_equal(len(proba), 2) assert_equal(proba[0].shape, (4, 2)) assert_equal(proba[1].shape, (4, 4)) log_proba = est.predict_log_proba(X_test) assert_equal(len(log_proba), 2) assert_equal(log_proba[0].shape, (4, 2)) assert_equal(log_proba[1].shape, (4, 4)) def test_multioutput(): for name in FOREST_CLASSIFIERS: yield check_multioutput, name for name in FOREST_REGRESSORS: yield check_multioutput, name def check_classes_shape(name): # Test that n_classes_ and classes_ have proper shape. ForestClassifier = FOREST_CLASSIFIERS[name] # Classification, single output clf = ForestClassifier(random_state=0).fit(X, y) assert_equal(clf.n_classes_, 2) assert_array_equal(clf.classes_, [-1, 1]) # Classification, multi-output _y = np.vstack((y, np.array(y) * 2)).T clf = ForestClassifier(random_state=0).fit(X, _y) assert_array_equal(clf.n_classes_, [2, 2]) assert_array_equal(clf.classes_, [[-1, 1], [-2, 2]]) def test_classes_shape(): for name in FOREST_CLASSIFIERS: yield check_classes_shape, name def test_random_trees_dense_type(): ''' Test that the `sparse_output` parameter of RandomTreesEmbedding works by returning a dense array. ''' # Create the RTE with sparse=False hasher = RandomTreesEmbedding(n_estimators=10, sparse_output=False) X, y = datasets.make_circles(factor=0.5) X_transformed = hasher.fit_transform(X) # Assert that type is ndarray, not scipy.sparse.csr.csr_matrix assert_equal(type(X_transformed), np.ndarray) def test_random_trees_dense_equal(): ''' Test that the `sparse_output` parameter of RandomTreesEmbedding works by returning the same array for both argument values. ''' # Create the RTEs hasher_dense = RandomTreesEmbedding(n_estimators=10, sparse_output=False, random_state=0) hasher_sparse = RandomTreesEmbedding(n_estimators=10, sparse_output=True, random_state=0) X, y = datasets.make_circles(factor=0.5) X_transformed_dense = hasher_dense.fit_transform(X) X_transformed_sparse = hasher_sparse.fit_transform(X) # Assert that dense and sparse hashers have same array. assert_array_equal(X_transformed_sparse.toarray(), X_transformed_dense) def test_random_hasher(): # test random forest hashing on circles dataset # make sure that it is linearly separable. # even after projected to two SVD dimensions # Note: Not all random_states produce perfect results. hasher = RandomTreesEmbedding(n_estimators=30, random_state=1) X, y = datasets.make_circles(factor=0.5) X_transformed = hasher.fit_transform(X) # test fit and transform: hasher = RandomTreesEmbedding(n_estimators=30, random_state=1) assert_array_equal(hasher.fit(X).transform(X).toarray(), X_transformed.toarray()) # one leaf active per data point per forest assert_equal(X_transformed.shape[0], X.shape[0]) assert_array_equal(X_transformed.sum(axis=1), hasher.n_estimators) svd = TruncatedSVD(n_components=2) X_reduced = svd.fit_transform(X_transformed) linear_clf = LinearSVC() linear_clf.fit(X_reduced, y) assert_equal(linear_clf.score(X_reduced, y), 1.) def test_random_hasher_sparse_data(): X, y = datasets.make_multilabel_classification(return_indicator=True, random_state=0) hasher = RandomTreesEmbedding(n_estimators=30, random_state=1) X_transformed = hasher.fit_transform(X) X_transformed_sparse = hasher.fit_transform(csc_matrix(X)) assert_array_equal(X_transformed_sparse.toarray(), X_transformed.toarray()) def test_parallel_train(): rng = check_random_state(12321) n_samples, n_features = 80, 30 X_train = rng.randn(n_samples, n_features) y_train = rng.randint(0, 2, n_samples) clfs = [ RandomForestClassifier(n_estimators=20, n_jobs=n_jobs, random_state=12345).fit(X_train, y_train) for n_jobs in [1, 2, 3, 8, 16, 32] ] X_test = rng.randn(n_samples, n_features) probas = [clf.predict_proba(X_test) for clf in clfs] for proba1, proba2 in zip(probas, probas[1:]): assert_array_almost_equal(proba1, proba2) def test_distribution(): rng = check_random_state(12321) # Single variable with 4 values X = rng.randint(0, 4, size=(1000, 1)) y = rng.rand(1000) n_trees = 500 clf = ExtraTreesRegressor(n_estimators=n_trees, random_state=42).fit(X, y) uniques = defaultdict(int) for tree in clf.estimators_: tree = "".join(("%d,%d/" % (f, int(t)) if f >= 0 else "-") for f, t in zip(tree.tree_.feature, tree.tree_.threshold)) uniques[tree] += 1 uniques = sorted([(1. * count / n_trees, tree) for tree, count in uniques.items()]) # On a single variable problem where X_0 has 4 equiprobable values, there # are 5 ways to build a random tree. The more compact (0,1/0,0/--0,2/--) of # them has probability 1/3 while the 4 others have probability 1/6. assert_equal(len(uniques), 5) assert_greater(0.20, uniques[0][0]) # Rough approximation of 1/6. assert_greater(0.20, uniques[1][0]) assert_greater(0.20, uniques[2][0]) assert_greater(0.20, uniques[3][0]) assert_greater(uniques[4][0], 0.3) assert_equal(uniques[4][1], "0,1/0,0/--0,2/--") # Two variables, one with 2 values, one with 3 values X = np.empty((1000, 2)) X[:, 0] = np.random.randint(0, 2, 1000) X[:, 1] = np.random.randint(0, 3, 1000) y = rng.rand(1000) clf = ExtraTreesRegressor(n_estimators=100, max_features=1, random_state=1).fit(X, y) uniques = defaultdict(int) for tree in clf.estimators_: tree = "".join(("%d,%d/" % (f, int(t)) if f >= 0 else "-") for f, t in zip(tree.tree_.feature, tree.tree_.threshold)) uniques[tree] += 1 uniques = [(count, tree) for tree, count in uniques.items()] assert_equal(len(uniques), 8) def check_max_leaf_nodes_max_depth(name, X, y): # Test precedence of max_leaf_nodes over max_depth. ForestEstimator = FOREST_ESTIMATORS[name] est = ForestEstimator(max_depth=1, max_leaf_nodes=4, n_estimators=1).fit(X, y) assert_greater(est.estimators_[0].tree_.max_depth, 1) est = ForestEstimator(max_depth=1, n_estimators=1).fit(X, y) assert_equal(est.estimators_[0].tree_.max_depth, 1) def test_max_leaf_nodes_max_depth(): X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1) for name in FOREST_ESTIMATORS: yield check_max_leaf_nodes_max_depth, name, X, y def check_min_samples_leaf(name, X, y): # Test if leaves contain more than leaf_count training examples ForestEstimator = FOREST_ESTIMATORS[name] # test both DepthFirstTreeBuilder and BestFirstTreeBuilder # by setting max_leaf_nodes for max_leaf_nodes in (None, 1000): est = ForestEstimator(min_samples_leaf=5, max_leaf_nodes=max_leaf_nodes, random_state=0) est.fit(X, y) out = est.estimators_[0].tree_.apply(X) node_counts = np.bincount(out) # drop inner nodes leaf_count = node_counts[node_counts != 0] assert_greater(np.min(leaf_count), 4, "Failed with {0}".format(name)) def test_min_samples_leaf(): X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1) X = X.astype(np.float32) for name in FOREST_ESTIMATORS: yield check_min_samples_leaf, name, X, y def check_min_weight_fraction_leaf(name, X, y): # Test if leaves contain at least min_weight_fraction_leaf of the # training set ForestEstimator = FOREST_ESTIMATORS[name] rng = np.random.RandomState(0) weights = rng.rand(X.shape[0]) total_weight = np.sum(weights) # test both DepthFirstTreeBuilder and BestFirstTreeBuilder # by setting max_leaf_nodes for max_leaf_nodes in (None, 1000): for frac in np.linspace(0, 0.5, 6): est = ForestEstimator(min_weight_fraction_leaf=frac, max_leaf_nodes=max_leaf_nodes, random_state=0) if isinstance(est, (RandomForestClassifier, RandomForestRegressor)): est.bootstrap = False est.fit(X, y, sample_weight=weights) out = est.estimators_[0].tree_.apply(X) node_weights = np.bincount(out, weights=weights) # drop inner nodes leaf_weights = node_weights[node_weights != 0] assert_greater_equal( np.min(leaf_weights), total_weight * est.min_weight_fraction_leaf, "Failed with {0} " "min_weight_fraction_leaf={1}".format( name, est.min_weight_fraction_leaf)) def test_min_weight_fraction_leaf(): X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1) X = X.astype(np.float32) for name in FOREST_ESTIMATORS: yield check_min_weight_fraction_leaf, name, X, y def check_sparse_input(name, X, X_sparse, y): ForestEstimator = FOREST_ESTIMATORS[name] dense = ForestEstimator(random_state=0, max_depth=2).fit(X, y) sparse = ForestEstimator(random_state=0, max_depth=2).fit(X_sparse, y) assert_array_almost_equal(sparse.apply(X), dense.apply(X)) if name in FOREST_CLASSIFIERS or name in FOREST_REGRESSORS: assert_array_almost_equal(sparse.predict(X), dense.predict(X)) assert_array_almost_equal(sparse.feature_importances_, dense.feature_importances_) if name in FOREST_CLASSIFIERS: assert_array_almost_equal(sparse.predict_proba(X), dense.predict_proba(X)) assert_array_almost_equal(sparse.predict_log_proba(X), dense.predict_log_proba(X)) if name in FOREST_TRANSFORMERS: assert_array_almost_equal(sparse.transform(X).toarray(), dense.transform(X).toarray()) assert_array_almost_equal(sparse.fit_transform(X).toarray(), dense.fit_transform(X).toarray()) def test_sparse_input(): X, y = datasets.make_multilabel_classification(return_indicator=True, random_state=0, n_samples=40) for name, sparse_matrix in product(FOREST_ESTIMATORS, (csr_matrix, csc_matrix, coo_matrix)): yield check_sparse_input, name, X, sparse_matrix(X), y def check_memory_layout(name, dtype): # Check that it works no matter the memory layout est = FOREST_ESTIMATORS[name](random_state=0, bootstrap=False) # Nothing X = np.asarray(iris.data, dtype=dtype) y = iris.target assert_array_equal(est.fit(X, y).predict(X), y) # C-order X = np.asarray(iris.data, order="C", dtype=dtype) y = iris.target assert_array_equal(est.fit(X, y).predict(X), y) # F-order X = np.asarray(iris.data, order="F", dtype=dtype) y = iris.target assert_array_equal(est.fit(X, y).predict(X), y) # Contiguous X = np.ascontiguousarray(iris.data, dtype=dtype) y = iris.target assert_array_equal(est.fit(X, y).predict(X), y) if est.base_estimator.splitter in SPARSE_SPLITTERS: # csr matrix X = csr_matrix(iris.data, dtype=dtype) y = iris.target assert_array_equal(est.fit(X, y).predict(X), y) # csc_matrix X = csc_matrix(iris.data, dtype=dtype) y = iris.target assert_array_equal(est.fit(X, y).predict(X), y) # coo_matrix X = coo_matrix(iris.data, dtype=dtype) y = iris.target assert_array_equal(est.fit(X, y).predict(X), y) # Strided X = np.asarray(iris.data[::3], dtype=dtype) y = iris.target[::3] assert_array_equal(est.fit(X, y).predict(X), y) def test_memory_layout(): for name, dtype in product(FOREST_CLASSIFIERS, [np.float64, np.float32]): yield check_memory_layout, name, dtype for name, dtype in product(FOREST_REGRESSORS, [np.float64, np.float32]): yield check_memory_layout, name, dtype def check_1d_input(name, X, X_2d, y): ForestEstimator = FOREST_ESTIMATORS[name] assert_raises(ValueError, ForestEstimator(random_state=0).fit, X, y) est = ForestEstimator(random_state=0) est.fit(X_2d, y) if name in FOREST_CLASSIFIERS or name in FOREST_REGRESSORS: assert_raises(ValueError, est.predict, X) def test_1d_input(): X = iris.data[:, 0].ravel() X_2d = iris.data[:, 0].reshape((-1, 1)) y = iris.target for name in FOREST_ESTIMATORS: yield check_1d_input, name, X, X_2d, y def check_class_weights(name): # Check class_weights resemble sample_weights behavior. ForestClassifier = FOREST_CLASSIFIERS[name] # Iris is balanced, so no effect expected for using 'auto' weights clf1 = ForestClassifier(random_state=0) clf1.fit(iris.data, iris.target) clf2 = ForestClassifier(class_weight='auto', random_state=0) clf2.fit(iris.data, iris.target) assert_almost_equal(clf1.feature_importances_, clf2.feature_importances_) # Make a multi-output problem with three copies of Iris iris_multi = np.vstack((iris.target, iris.target, iris.target)).T # Create user-defined weights that should balance over the outputs clf3 = ForestClassifier(class_weight=[{0: 2., 1: 2., 2: 1.}, {0: 2., 1: 1., 2: 2.}, {0: 1., 1: 2., 2: 2.}], random_state=0) clf3.fit(iris.data, iris_multi) assert_almost_equal(clf2.feature_importances_, clf3.feature_importances_) # Check against multi-output "auto" which should also have no effect clf4 = ForestClassifier(class_weight='auto', random_state=0) clf4.fit(iris.data, iris_multi) assert_almost_equal(clf3.feature_importances_, clf4.feature_importances_) # Inflate importance of class 1, check against user-defined weights sample_weight = np.ones(iris.target.shape) sample_weight[iris.target == 1] *= 100 class_weight = {0: 1., 1: 100., 2: 1.} clf1 = ForestClassifier(random_state=0) clf1.fit(iris.data, iris.target, sample_weight) clf2 = ForestClassifier(class_weight=class_weight, random_state=0) clf2.fit(iris.data, iris.target) assert_almost_equal(clf1.feature_importances_, clf2.feature_importances_) # Check that sample_weight and class_weight are multiplicative clf1 = ForestClassifier(random_state=0) clf1.fit(iris.data, iris.target, sample_weight**2) clf2 = ForestClassifier(class_weight=class_weight, random_state=0) clf2.fit(iris.data, iris.target, sample_weight) assert_almost_equal(clf1.feature_importances_, clf2.feature_importances_) def test_class_weights(): for name in FOREST_CLASSIFIERS: yield check_class_weights, name def check_class_weight_auto_and_bootstrap_multi_output(name): # Test class_weight works for multi-output ForestClassifier = FOREST_CLASSIFIERS[name] _y = np.vstack((y, np.array(y) * 2)).T clf = ForestClassifier(class_weight='auto', random_state=0) clf.fit(X, _y) clf = ForestClassifier(class_weight=[{-1: 0.5, 1: 1.}, {-2: 1., 2: 1.}], random_state=0) clf.fit(X, _y) clf = ForestClassifier(class_weight='subsample', random_state=0) clf.fit(X, _y) def test_class_weight_auto_and_bootstrap_multi_output(): for name in FOREST_CLASSIFIERS: yield check_class_weight_auto_and_bootstrap_multi_output, name def check_class_weight_errors(name): # Test if class_weight raises errors and warnings when expected. ForestClassifier = FOREST_CLASSIFIERS[name] _y = np.vstack((y, np.array(y) * 2)).T # Invalid preset string clf = ForestClassifier(class_weight='the larch', random_state=0) assert_raises(ValueError, clf.fit, X, y) assert_raises(ValueError, clf.fit, X, _y) # Warning warm_start with preset clf = ForestClassifier(class_weight='auto', warm_start=True, random_state=0) assert_warns(UserWarning, clf.fit, X, y) assert_warns(UserWarning, clf.fit, X, _y) # Not a list or preset for multi-output clf = ForestClassifier(class_weight=1, random_state=0) assert_raises(ValueError, clf.fit, X, _y) # Incorrect length list for multi-output clf = ForestClassifier(class_weight=[{-1: 0.5, 1: 1.}], random_state=0) assert_raises(ValueError, clf.fit, X, _y) def test_class_weight_errors(): for name in FOREST_CLASSIFIERS: yield check_class_weight_errors, name def check_warm_start(name, random_state=42): # Test if fitting incrementally with warm start gives a forest of the # right size and the same results as a normal fit. X, y = datasets.make_hastie_10_2(n_samples=20, random_state=1) ForestEstimator = FOREST_ESTIMATORS[name] clf_ws = None for n_estimators in [5, 10]: if clf_ws is None: clf_ws = ForestEstimator(n_estimators=n_estimators, random_state=random_state, warm_start=True) else: clf_ws.set_params(n_estimators=n_estimators) clf_ws.fit(X, y) assert_equal(len(clf_ws), n_estimators) clf_no_ws = ForestEstimator(n_estimators=10, random_state=random_state, warm_start=False) clf_no_ws.fit(X, y) assert_equal(set([tree.random_state for tree in clf_ws]), set([tree.random_state for tree in clf_no_ws])) assert_array_equal(clf_ws.apply(X), clf_no_ws.apply(X), err_msg="Failed with {0}".format(name)) def test_warm_start(): for name in FOREST_ESTIMATORS: yield check_warm_start, name def check_warm_start_clear(name): # Test if fit clears state and grows a new forest when warm_start==False. X, y = datasets.make_hastie_10_2(n_samples=20, random_state=1) ForestEstimator = FOREST_ESTIMATORS[name] clf = ForestEstimator(n_estimators=5, max_depth=1, warm_start=False, random_state=1) clf.fit(X, y) clf_2 = ForestEstimator(n_estimators=5, max_depth=1, warm_start=True, random_state=2) clf_2.fit(X, y) # inits state clf_2.set_params(warm_start=False, random_state=1) clf_2.fit(X, y) # clears old state and equals clf assert_array_almost_equal(clf_2.apply(X), clf.apply(X)) def test_warm_start_clear(): for name in FOREST_ESTIMATORS: yield check_warm_start_clear, name def check_warm_start_smaller_n_estimators(name): # Test if warm start second fit with smaller n_estimators raises error. X, y = datasets.make_hastie_10_2(n_samples=20, random_state=1) ForestEstimator = FOREST_ESTIMATORS[name] clf = ForestEstimator(n_estimators=5, max_depth=1, warm_start=True) clf.fit(X, y) clf.set_params(n_estimators=4) assert_raises(ValueError, clf.fit, X, y) def test_warm_start_smaller_n_estimators(): for name in FOREST_ESTIMATORS: yield check_warm_start_smaller_n_estimators, name def check_warm_start_equal_n_estimators(name): # Test if warm start with equal n_estimators does nothing and returns the # same forest and raises a warning. X, y = datasets.make_hastie_10_2(n_samples=20, random_state=1) ForestEstimator = FOREST_ESTIMATORS[name] clf = ForestEstimator(n_estimators=5, max_depth=3, warm_start=True, random_state=1) clf.fit(X, y) clf_2 = ForestEstimator(n_estimators=5, max_depth=3, warm_start=True, random_state=1) clf_2.fit(X, y) # Now clf_2 equals clf. clf_2.set_params(random_state=2) assert_warns(UserWarning, clf_2.fit, X, y) # If we had fit the trees again we would have got a different forest as we # changed the random state. assert_array_equal(clf.apply(X), clf_2.apply(X)) def test_warm_start_equal_n_estimators(): for name in FOREST_ESTIMATORS: yield check_warm_start_equal_n_estimators, name def check_warm_start_oob(name): # Test that the warm start computes oob score when asked. X, y = datasets.make_hastie_10_2(n_samples=20, random_state=1) ForestEstimator = FOREST_ESTIMATORS[name] # Use 15 estimators to avoid 'some inputs do not have OOB scores' warning. clf = ForestEstimator(n_estimators=15, max_depth=3, warm_start=False, random_state=1, bootstrap=True, oob_score=True) clf.fit(X, y) clf_2 = ForestEstimator(n_estimators=5, max_depth=3, warm_start=False, random_state=1, bootstrap=True, oob_score=False) clf_2.fit(X, y) clf_2.set_params(warm_start=True, oob_score=True, n_estimators=15) clf_2.fit(X, y) assert_true(hasattr(clf_2, 'oob_score_')) assert_equal(clf.oob_score_, clf_2.oob_score_) # Test that oob_score is computed even if we don't need to train # additional trees. clf_3 = ForestEstimator(n_estimators=15, max_depth=3, warm_start=True, random_state=1, bootstrap=True, oob_score=False) clf_3.fit(X, y) assert_true(not(hasattr(clf_3, 'oob_score_'))) clf_3.set_params(oob_score=True) ignore_warnings(clf_3.fit)(X, y) assert_equal(clf.oob_score_, clf_3.oob_score_) def test_warm_start_oob(): for name in FOREST_CLASSIFIERS: yield check_warm_start_oob, name for name in FOREST_REGRESSORS: yield check_warm_start_oob, name if __name__ == "__main__": import nose nose.runmodule()
bsd-3-clause
FreekingDean/home-assistant
homeassistant/components/android_ip_webcam/switch.py
22
2732
"""Support for Android IP Webcam settings.""" from homeassistant.components.switch import SwitchEntity from . import ( CONF_HOST, CONF_NAME, CONF_SWITCHES, DATA_IP_WEBCAM, ICON_MAP, KEY_MAP, AndroidIPCamEntity, ) async def async_setup_platform(hass, config, async_add_entities, discovery_info=None): """Set up the IP Webcam switch platform.""" if discovery_info is None: return host = discovery_info[CONF_HOST] name = discovery_info[CONF_NAME] switches = discovery_info[CONF_SWITCHES] ipcam = hass.data[DATA_IP_WEBCAM][host] all_switches = [] for setting in switches: all_switches.append(IPWebcamSettingsSwitch(name, host, ipcam, setting)) async_add_entities(all_switches, True) class IPWebcamSettingsSwitch(AndroidIPCamEntity, SwitchEntity): """An abstract class for an IP Webcam setting.""" def __init__(self, name, host, ipcam, setting): """Initialize the settings switch.""" super().__init__(host, ipcam) self._setting = setting self._mapped_name = KEY_MAP.get(self._setting, self._setting) self._name = f"{name} {self._mapped_name}" self._state = False @property def name(self): """Return the name of the node.""" return self._name async def async_update(self): """Get the updated status of the switch.""" self._state = bool(self._ipcam.current_settings.get(self._setting)) @property def is_on(self): """Return the boolean response if the node is on.""" return self._state async def async_turn_on(self, **kwargs): """Turn device on.""" if self._setting == "torch": await self._ipcam.torch(activate=True) elif self._setting == "focus": await self._ipcam.focus(activate=True) elif self._setting == "video_recording": await self._ipcam.record(record=True) else: await self._ipcam.change_setting(self._setting, True) self._state = True self.async_write_ha_state() async def async_turn_off(self, **kwargs): """Turn device off.""" if self._setting == "torch": await self._ipcam.torch(activate=False) elif self._setting == "focus": await self._ipcam.focus(activate=False) elif self._setting == "video_recording": await self._ipcam.record(record=False) else: await self._ipcam.change_setting(self._setting, False) self._state = False self.async_write_ha_state() @property def icon(self): """Return the icon for the switch.""" return ICON_MAP.get(self._setting, "mdi:flash")
apache-2.0
ai-ku/uwsd
run/mapping.py
1
5901
#! /usr/bin/python # -*- coding: utf-8 -*- __author__ = "Osman Baskaya" import sys from collections import defaultdict as dd import numpy as np import random from sklearn.preprocessing import normalize random.seed(42) def chunks(l, n): """ Yield successive n-sized chunks from l. """ for i in xrange(0, len(l), n): yield l[i:i+n] def load_key(fname): print >> sys.stderr, "loading %s" % fname d = dd(lambda: dd(lambda: dd(lambda : 0.))) lines = open(fname).readlines() #c = 0 for line in lines: line = line.split() key, inst = line[:2] senses = line[2:] senses = [sense.split('/') for sense in senses] if len(senses) == 1: #c += 1 d[key][inst][senses[0][0]] = 1. else: uni = [] for sense in senses: if len(sense) == 1: uni.append(sense) else: d[key][inst][sense[0]] = float(sense[1]) if len(uni) > 0: assert len(uni) != len(senses), "Some sense weighted, some not: %s" % inst val = 1. / len(uni) for sense in senses: d[key][inst][sense[0]] = val return d def remap(gold_instances, test_instances, training_instances): # removing instances that excluded from gold data (e.g. SemEval 2013) difference = set(test_instances.keys()).difference(set(gold_instances)) map(test_instances.pop, difference) test_ids = [] gold_ids = [] for instance_id in training_instances: gs_perception = gold_instances[instance_id] ts_perception = test_instances[instance_id] if gs_perception is not None and ts_perception is not None: test_ids.extend(ts_perception.keys()) gold_ids.extend(gs_perception.keys()) gold_ids = set(gold_ids) test_ids = set(test_ids) m = len(test_ids) n = len(gold_ids) # for matrix indexing. each different sense gets the index if m != 0 and n != 0: test_sense_ids = dict(zip(test_ids, range(m))) gold_sense_ids = dict(zip(gold_ids, range(n))) #print "test senses - ids", test_sense_ids #print "gold senses - ids:", gold_sense_ids #print mapping_matrix = np.zeros([m, n]) for instance_id in training_instances: gs_perception = gold_instances[instance_id] ts_perception = test_instances[instance_id] for key, val in ts_perception.iteritems(): ts_ind = test_sense_ids[key] #print ts_ind, key, val, "\t", for gold_key, gold_val in gs_perception.iteritems(): gs_ind = gold_sense_ids[gold_key] #print gs_ind, gold_key, gold_val score = gold_val * val mapping_matrix[ts_ind, gs_ind] += score #print mapping_matrix # Normalize the matrix mapping_matrix = normalize(mapping_matrix, norm='l1', axis=1) #print "After normalization\n", mapping_matrix #print "all instances:", test_instances.keys() #print "training instances:", training_instances test_inst_ids = set(test_instances.keys()).difference(training_instances) #print "test instance ids:", test_inst_ids remapped = dict() for test_inst_id in test_inst_ids: test_vector = np.zeros(mapping_matrix.shape[0]) ts_perception = test_instances[test_inst_id] for key, col in test_sense_ids.iteritems(): if key in ts_perception: test_vector[col] = ts_perception[key] result = np.dot(test_vector, mapping_matrix) mapped = [(sense, result[ind]) for sense, ind in gold_sense_ids.iteritems() if result[ind] != 0] if len(mapped) > 0: remapped[test_inst_id] = dict(mapped) else: print >> sys.stderr, "problem for %s" % test_inst_id return remapped def print_as_ans_key(lemma, d, one_sense=True): def instance_compare(tt): return int(tt[0].split('.')[-1]) for inst_id, sense_dict in sorted(d.iteritems(), key=instance_compare): print "{0} {1}".format(lemma, inst_id), # sort so that first sense has the maximum degree sorted_senses = sorted(sense_dict.iteritems(), key=lambda x: x[1], reverse=True) if one_sense: sorted_senses = sorted_senses[0:1] print " ".join(["{0}/{1}".format(*s) for s in sorted_senses]) def run_eval(lemma, goldkey, testkey, test_sets, all_instances): all_instances = set(all_instances) all_chunks = {} for test_instances in test_sets: training_instances = all_instances.difference(test_instances) remapped_testkey = remap(goldkey, testkey, training_instances) if len(training_instances) != 0: all_chunks.update(remapped_testkey) print_as_ans_key(lemma, all_chunks) goldkey = load_key(sys.argv[1]) testkey = load_key(sys.argv[2]) #NUMBER_OF_CHUNKS = 5 for lemma, inst_dict in sorted(goldkey.iteritems()): all_instances = inst_dict.keys() random.shuffle(all_instances) NUMBER_OF_CHUNKS = len(all_instances) # leave-one-out-cross-validation test_sets = list(chunks(all_instances, len(all_instances) / NUMBER_OF_CHUNKS)) # if division not exact, add all instances in last chunk to previous so that # we have NUMBER_OF_CHUNKS chunks. if len(test_sets) == NUMBER_OF_CHUNKS+1: test_sets[NUMBER_OF_CHUNKS-1].extend(test_sets[NUMBER_OF_CHUNKS]) test_sets.pop() test_sets = [set(t) for t in test_sets] if len(testkey[lemma]) != 0: run_eval(lemma, goldkey[lemma], testkey[lemma], test_sets, all_instances)
mit
neurospin/pylearn-epac
epac/sklearn_plugins/estimators.py
1
11147
""" Estimator wrap ML procedure into EPAC Node. To be EPAC compatible, one should inherit from BaseNode and implement the "transform" method. InternalEstimator and LeafEstimator aim to provide automatic wrapper to objects that implement fit and predict methods. @author: edouard.duchesnay@cea.fr @author: jinpeng.li@cea.fr """ ## Abreviations ## tr: train ## te: test from epac.utils import _func_get_args_names from epac.utils import train_test_merge from epac.utils import train_test_split from epac.utils import _dict_suffix_keys from epac.utils import _sub_dict, _as_dict from epac.configuration import conf from epac.workflow.wrappers import Wrapper class Estimator(Wrapper): """Estimator Wrapper: Automatically connect wrapped_node.fit and wrapped_node.transform to BaseNode.transform Parameters ---------- wrapped_node: any class with fit and transform or fit and predict functions any class implementing fit and transform or implementing fit and predict in_args_fit: list of strings names of input arguments of the fit method. If missing, discover it automatically. in_args_transform: list of strings names of input arguments of the transform method. If missing, discover it automatically. in_args_predict: list of strings names of input arguments of the predict method. If missing, discover it automatically Example ------- >>> from sklearn.lda import LDA >>> from sklearn import datasets >>> from sklearn.feature_selection import SelectKBest >>> from sklearn.svm import SVC >>> from epac import Pipe >>> from epac import CV, Methods >>> from epac.sklearn_plugins import Estimator >>> >>> X, y = datasets.make_classification(n_samples=15, ... n_features=10, ... n_informative=7, ... random_state=5) >>> Xy = dict(X=X, y=y) >>> lda_estimator = Estimator(LDA()) >>> lda_estimator.transform(**Xy) {'y/true': array([1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 1, 0, 1, 1, 1]), 'y/pred': array([1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 0, 1, 1, 1])} >>> pipe = Pipe(SelectKBest(k=7), lda_estimator) >>> pipe.run(**Xy) {'y/true': array([1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 1, 0, 1, 1, 1]), 'y/pred': array([1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 0, 1, 1, 1])} >>> pipe2 = Pipe(lda_estimator, SVC()) >>> pipe2.run(**Xy) {'y/true': array([1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 1, 0, 1, 1, 1]), 'y/pred': array([1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 0, 1, 1, 1])} >>> cv = CV(Methods(pipe, SVC()), n_folds=3) >>> cv.run(**Xy) [[{'y/test/pred': array([0, 0, 0, 0, 0, 0]), 'y/train/pred': array([0, 0, 0, 0, 1, 0, 1, 1, 1]), 'y/test/true': array([1, 0, 1, 1, 0, 0])}, {'y/test/pred': array([0, 0, 0, 1, 0, 0]), 'y/train/pred': array([0, 0, 0, 0, 1, 0, 1, 1, 1]), 'y/test/true': array([1, 0, 1, 1, 0, 0])}], [{'y/test/pred': array([0, 0, 0, 0, 1]), 'y/train/pred': array([1, 0, 1, 1, 0, 0, 0, 0, 1, 1]), 'y/test/true': array([0, 0, 0, 1, 1])}, {'y/test/pred': array([1, 0, 0, 1, 1]), 'y/train/pred': array([1, 0, 1, 1, 0, 0, 0, 0, 1, 1]), 'y/test/true': array([0, 0, 0, 1, 1])}], [{'y/test/pred': array([1, 1, 0, 0]), 'y/train/pred': array([1, 0, 1, 1, 0, 0, 0, 0, 0, 1, 1]), 'y/test/true': array([0, 0, 1, 1])}, {'y/test/pred': array([0, 0, 1, 0]), 'y/train/pred': array([1, 0, 1, 1, 0, 0, 0, 0, 0, 1, 1]), 'y/test/true': array([0, 0, 1, 1])}]] >>> cv.reduce() ResultSet( [{'key': SelectKBest/LDA/SVC, 'y/test/score_precision': [ 0.5 0.33333333], 'y/test/score_recall': [ 0.75 0.14285714], 'y/test/score_accuracy': 0.466666666667, 'y/test/score_f1': [ 0.6 0.2], 'y/test/score_recall_mean': 0.446428571429}, {'key': SVC, 'y/test/score_precision': [ 0.7 0.8], 'y/test/score_recall': [ 0.875 0.57142857], 'y/test/score_accuracy': 0.733333333333, 'y/test/score_f1': [ 0.77777778 0.66666667], 'y/test/score_recall_mean': 0.723214285714}]) >>> cv2 = CV(Methods(pipe2, SVC()), n_folds=3) >>> cv2.run(**Xy) [[{'y/test/pred': array([0, 0, 0, 0, 0, 0]), 'y/train/pred': array([0, 0, 0, 0, 1, 0, 1, 1, 1]), 'y/test/true': array([1, 0, 1, 1, 0, 0])}, {'y/test/pred': array([0, 0, 0, 1, 0, 0]), 'y/train/pred': array([0, 0, 0, 0, 1, 0, 1, 1, 1]), 'y/test/true': array([1, 0, 1, 1, 0, 0])}], [{'y/test/pred': array([0, 0, 0, 0, 0]), 'y/train/pred': array([1, 0, 1, 1, 0, 0, 0, 0, 1, 1]), 'y/test/true': array([0, 0, 0, 1, 1])}, {'y/test/pred': array([1, 0, 0, 1, 1]), 'y/train/pred': array([1, 0, 1, 1, 0, 0, 0, 0, 1, 1]), 'y/test/true': array([0, 0, 0, 1, 1])}], [{'y/test/pred': array([1, 1, 0, 0]), 'y/train/pred': array([1, 0, 1, 1, 0, 0, 0, 0, 0, 1, 1]), 'y/test/true': array([0, 0, 1, 1])}, {'y/test/pred': array([0, 0, 1, 0]), 'y/train/pred': array([1, 0, 1, 1, 0, 0, 0, 0, 0, 1, 1]), 'y/test/true': array([0, 0, 1, 1])}]] >>> cv2.reduce() ResultSet( [{'key': LDA/SVC, 'y/test/score_precision': [ 0.46153846 0. ], 'y/test/score_recall': [ 0.75 0. ], 'y/test/score_accuracy': 0.4, 'y/test/score_f1': [ 0.57142857 0. ], 'y/test/score_recall_mean': 0.375}, {'key': SVC, 'y/test/score_precision': [ 0.7 0.8], 'y/test/score_recall': [ 0.875 0.57142857], 'y/test/score_accuracy': 0.733333333333, 'y/test/score_f1': [ 0.77777778 0.66666667], 'y/test/score_recall_mean': 0.723214285714}]) """ def __init__(self, wrapped_node, in_args_fit=None, in_args_transform=None, in_args_predict=None, out_args_predict=None): is_fit_estimator = False if hasattr(wrapped_node, "fit") and hasattr(wrapped_node, "transform"): is_fit_estimator = True elif hasattr(wrapped_node, "fit") and hasattr(wrapped_node, "predict"): is_fit_estimator = True if not is_fit_estimator: raise ValueError("%s should implement fit and transform or fit " "and predict" % wrapped_node.__class__.__name__) super(Estimator, self).__init__(wrapped_node=wrapped_node) if in_args_fit: self.in_args_fit = in_args_fit else: self.in_args_fit = _func_get_args_names(self.wrapped_node.fit) # Internal Estimator if hasattr(wrapped_node, "transform"): if in_args_transform: self.in_args_transform = in_args_transform else: self.in_args_transform = \ _func_get_args_names(self.wrapped_node.transform) # Leaf Estimator if hasattr(wrapped_node, "predict"): if in_args_predict: self.in_args_predict = in_args_predict else: self.in_args_predict = \ _func_get_args_names(self.wrapped_node.predict) if out_args_predict is None: fit_predict_diff = list(set(self.in_args_fit).difference( self.in_args_predict)) if len(fit_predict_diff) > 0: self.out_args_predict = fit_predict_diff else: self.out_args_predict = self.in_args_predict else: self.out_args_predict = out_args_predict def _wrapped_node_transform(self, **Xy): Xy_out = _as_dict(self.wrapped_node.transform( **_sub_dict(Xy, self.in_args_transform)), keys=self.in_args_transform) return Xy_out def _wrapped_node_predict(self, **Xy): Xy_out = _as_dict(self.wrapped_node.predict( **_sub_dict(Xy, self.in_args_predict)), keys=self.out_args_predict) return Xy_out def transform(self, **Xy): """ Parameter --------- Xy: dictionary parameters for fit and transform """ is_fit_predict = False is_fit_transform = False if (hasattr(self.wrapped_node, "transform") and hasattr(self.wrapped_node, "predict")): if not self.children: # leaf node is_fit_predict = True else: # internal node is_fit_transform = True elif hasattr(self.wrapped_node, "transform"): is_fit_transform = True elif hasattr(self.wrapped_node, "predict"): is_fit_predict = True if is_fit_transform: Xy_train, Xy_test = train_test_split(Xy) if Xy_train is not Xy_test: res = self.wrapped_node.fit(**_sub_dict(Xy_train, self.in_args_fit)) Xy_out_tr = self._wrapped_node_transform(**Xy_train) Xy_out_te = self._wrapped_node_transform(**Xy_test) Xy_out = train_test_merge(Xy_out_tr, Xy_out_te) else: res = self.wrapped_node.fit(**_sub_dict(Xy, self.in_args_fit)) Xy_out = self._wrapped_node_transform(**Xy) # update ds with transformed values Xy.update(Xy_out) return Xy elif is_fit_predict: Xy_train, Xy_test = train_test_split(Xy) if Xy_train is not Xy_test: Xy_out = dict() res = self.wrapped_node.fit(**_sub_dict(Xy_train, self.in_args_fit)) Xy_out_tr = self._wrapped_node_predict(**Xy_train) Xy_out_tr = _dict_suffix_keys( Xy_out_tr, suffix=conf.SEP + conf.TRAIN + conf.SEP + conf.PREDICTION) Xy_out.update(Xy_out_tr) # Test predict Xy_out_te = self._wrapped_node_predict(**Xy_test) Xy_out_te = _dict_suffix_keys( Xy_out_te, suffix=conf.SEP + conf.TEST + conf.SEP + conf.PREDICTION) Xy_out.update(Xy_out_te) ## True test Xy_test_true = _sub_dict(Xy_test, self.out_args_predict) Xy_out_true = _dict_suffix_keys( Xy_test_true, suffix=conf.SEP + conf.TEST + conf.SEP + conf.TRUE) Xy_out.update(Xy_out_true) else: res = self.wrapped_node.fit(**_sub_dict(Xy, self.in_args_fit)) Xy_out = self._wrapped_node_predict(**Xy) Xy_out = _dict_suffix_keys( Xy_out, suffix=conf.SEP + conf.PREDICTION) ## True test Xy_true = _sub_dict(Xy, self.out_args_predict) Xy_out_true = _dict_suffix_keys( Xy_true, suffix=conf.SEP + conf.TRUE) Xy_out.update(Xy_out_true) return Xy_out else: raise ValueError("%s should implement either transform or predict" % self.wrapped_node.__class__.__name__) if __name__ == "__main__": import doctest doctest.testmod()
bsd-3-clause
thientu/scikit-learn
sklearn/linear_model/__init__.py
268
3096
""" The :mod:`sklearn.linear_model` module implements generalized linear models. It includes Ridge regression, Bayesian Regression, Lasso and Elastic Net estimators computed with Least Angle Regression and coordinate descent. It also implements Stochastic Gradient Descent related algorithms. """ # See http://scikit-learn.sourceforge.net/modules/sgd.html and # http://scikit-learn.sourceforge.net/modules/linear_model.html for # complete documentation. from .base import LinearRegression from .bayes import BayesianRidge, ARDRegression from .least_angle import (Lars, LassoLars, lars_path, LarsCV, LassoLarsCV, LassoLarsIC) from .coordinate_descent import (Lasso, ElasticNet, LassoCV, ElasticNetCV, lasso_path, enet_path, MultiTaskLasso, MultiTaskElasticNet, MultiTaskElasticNetCV, MultiTaskLassoCV) from .sgd_fast import Hinge, Log, ModifiedHuber, SquaredLoss, Huber from .stochastic_gradient import SGDClassifier, SGDRegressor from .ridge import (Ridge, RidgeCV, RidgeClassifier, RidgeClassifierCV, ridge_regression) from .logistic import (LogisticRegression, LogisticRegressionCV, logistic_regression_path) from .omp import (orthogonal_mp, orthogonal_mp_gram, OrthogonalMatchingPursuit, OrthogonalMatchingPursuitCV) from .passive_aggressive import PassiveAggressiveClassifier from .passive_aggressive import PassiveAggressiveRegressor from .perceptron import Perceptron from .randomized_l1 import (RandomizedLasso, RandomizedLogisticRegression, lasso_stability_path) from .ransac import RANSACRegressor from .theil_sen import TheilSenRegressor __all__ = ['ARDRegression', 'BayesianRidge', 'ElasticNet', 'ElasticNetCV', 'Hinge', 'Huber', 'Lars', 'LarsCV', 'Lasso', 'LassoCV', 'LassoLars', 'LassoLarsCV', 'LassoLarsIC', 'LinearRegression', 'Log', 'LogisticRegression', 'LogisticRegressionCV', 'ModifiedHuber', 'MultiTaskElasticNet', 'MultiTaskElasticNetCV', 'MultiTaskLasso', 'MultiTaskLassoCV', 'OrthogonalMatchingPursuit', 'OrthogonalMatchingPursuitCV', 'PassiveAggressiveClassifier', 'PassiveAggressiveRegressor', 'Perceptron', 'RandomizedLasso', 'RandomizedLogisticRegression', 'Ridge', 'RidgeCV', 'RidgeClassifier', 'RidgeClassifierCV', 'SGDClassifier', 'SGDRegressor', 'SquaredLoss', 'TheilSenRegressor', 'enet_path', 'lars_path', 'lasso_path', 'lasso_stability_path', 'logistic_regression_path', 'orthogonal_mp', 'orthogonal_mp_gram', 'ridge_regression', 'RANSACRegressor']
bsd-3-clause
solashirai/edx-platform
openedx/core/lib/block_structure/tests/test_transformers.py
7
2822
""" Tests for transformers.py """ from mock import MagicMock, patch from nose.plugins.attrib import attr from unittest import TestCase from ..block_structure import BlockStructureModulestoreData from ..exceptions import TransformerException from ..transformers import BlockStructureTransformers from .helpers import ( ChildrenMapTestMixin, MockTransformer, mock_registered_transformers ) @attr('shard_2') class TestBlockStructureTransformers(ChildrenMapTestMixin, TestCase): """ Test class for testing BlockStructureTransformers """ class UnregisteredTransformer(MockTransformer): """ Mock transformer that is not registered. """ pass def setUp(self): super(TestBlockStructureTransformers, self).setUp() self.transformers = BlockStructureTransformers(usage_info=MagicMock()) self.registered_transformers = [MockTransformer] def add_mock_transformer(self): """ Adds the registered transformers to the self.transformers collection. """ with mock_registered_transformers(self.registered_transformers): self.transformers += self.registered_transformers def test_add_registered(self): self.add_mock_transformer() self.assertIn(MockTransformer, self.transformers._transformers) # pylint: disable=protected-access def test_add_unregistered(self): with self.assertRaises(TransformerException): self.transformers += [self.UnregisteredTransformer] self.assertEquals(self.transformers._transformers, []) # pylint: disable=protected-access def test_collect(self): with mock_registered_transformers(self.registered_transformers): with patch( 'openedx.core.lib.block_structure.tests.helpers.MockTransformer.collect' ) as mock_collect_call: self.transformers.collect(block_structure=MagicMock()) self.assertTrue(mock_collect_call.called) def test_transform(self): self.add_mock_transformer() with patch( 'openedx.core.lib.block_structure.tests.helpers.MockTransformer.transform' ) as mock_transform_call: self.transformers.transform(block_structure=MagicMock()) self.assertTrue(mock_transform_call.called) def test_is_collected_outdated(self): block_structure = self.create_block_structure( self.SIMPLE_CHILDREN_MAP, BlockStructureModulestoreData ) with mock_registered_transformers(self.registered_transformers): self.assertTrue(self.transformers.is_collected_outdated(block_structure)) self.transformers.collect(block_structure) self.assertFalse(self.transformers.is_collected_outdated(block_structure))
agpl-3.0
solashirai/edx-platform
openedx/core/lib/block_structure/tests/test_transformer_registry.py
14
1575
""" Tests for transformer_registry.py """ import ddt from nose.plugins.attrib import attr from unittest import TestCase from ..transformer_registry import TransformerRegistry from .helpers import MockTransformer, mock_registered_transformers class TestTransformer1(MockTransformer): """ 1st test instance of the MockTransformer that is registered. """ pass class TestTransformer2(MockTransformer): """ 2nd test instance of the MockTransformer that is registered. """ pass class UnregisteredTestTransformer3(MockTransformer): """ 3rd test instance of the MockTransformer that is not registered. """ pass @attr('shard_2') @ddt.ddt class TransformerRegistryTestCase(TestCase): """ Test cases for TransformerRegistry. """ @ddt.data( # None case ([], []), # 1 registered ([TestTransformer1()], []), # 2 registered ([TestTransformer1(), TestTransformer2()], []), # 1 unregistered ([UnregisteredTestTransformer3()], [UnregisteredTestTransformer3.name()]), # 1 registered and 1 unregistered ([TestTransformer1(), UnregisteredTestTransformer3()], [UnregisteredTestTransformer3.name()]), ) @ddt.unpack def test_find_unregistered(self, transformers, expected_unregistered): with mock_registered_transformers([TestTransformer1, TestTransformer2]): self.assertSetEqual( TransformerRegistry.find_unregistered(transformers), set(expected_unregistered), )
agpl-3.0
Akshay0724/scikit-learn
sklearn/feature_selection/tests/test_feature_select.py
43
26651
""" Todo: cross-check the F-value with stats model """ from __future__ import division import itertools import warnings import numpy as np from scipy import stats, sparse from numpy.testing import run_module_suite from sklearn.utils.testing import assert_equal from sklearn.utils.testing import assert_almost_equal from sklearn.utils.testing import assert_raises from sklearn.utils.testing import assert_true from sklearn.utils.testing import assert_array_equal from sklearn.utils.testing import assert_array_almost_equal from sklearn.utils.testing import assert_not_in from sklearn.utils.testing import assert_less from sklearn.utils.testing import assert_warns from sklearn.utils.testing import ignore_warnings from sklearn.utils.testing import assert_warns_message from sklearn.utils.testing import assert_greater from sklearn.utils.testing import assert_greater_equal from sklearn.utils import safe_mask from sklearn.datasets.samples_generator import (make_classification, make_regression) from sklearn.feature_selection import ( chi2, f_classif, f_oneway, f_regression, mutual_info_classif, mutual_info_regression, SelectPercentile, SelectKBest, SelectFpr, SelectFdr, SelectFwe, GenericUnivariateSelect) ############################################################################## # Test the score functions def test_f_oneway_vs_scipy_stats(): # Test that our f_oneway gives the same result as scipy.stats rng = np.random.RandomState(0) X1 = rng.randn(10, 3) X2 = 1 + rng.randn(10, 3) f, pv = stats.f_oneway(X1, X2) f2, pv2 = f_oneway(X1, X2) assert_true(np.allclose(f, f2)) assert_true(np.allclose(pv, pv2)) def test_f_oneway_ints(): # Smoke test f_oneway on integers: that it does raise casting errors # with recent numpys rng = np.random.RandomState(0) X = rng.randint(10, size=(10, 10)) y = np.arange(10) fint, pint = f_oneway(X, y) # test that is gives the same result as with float f, p = f_oneway(X.astype(np.float), y) assert_array_almost_equal(f, fint, decimal=4) assert_array_almost_equal(p, pint, decimal=4) def test_f_classif(): # Test whether the F test yields meaningful results # on a simple simulated classification problem X, y = make_classification(n_samples=200, n_features=20, n_informative=3, n_redundant=2, n_repeated=0, n_classes=8, n_clusters_per_class=1, flip_y=0.0, class_sep=10, shuffle=False, random_state=0) F, pv = f_classif(X, y) F_sparse, pv_sparse = f_classif(sparse.csr_matrix(X), y) assert_true((F > 0).all()) assert_true((pv > 0).all()) assert_true((pv < 1).all()) assert_true((pv[:5] < 0.05).all()) assert_true((pv[5:] > 1.e-4).all()) assert_array_almost_equal(F_sparse, F) assert_array_almost_equal(pv_sparse, pv) def test_f_regression(): # Test whether the F test yields meaningful results # on a simple simulated regression problem X, y = make_regression(n_samples=200, n_features=20, n_informative=5, shuffle=False, random_state=0) F, pv = f_regression(X, y) assert_true((F > 0).all()) assert_true((pv > 0).all()) assert_true((pv < 1).all()) assert_true((pv[:5] < 0.05).all()) assert_true((pv[5:] > 1.e-4).all()) # with centering, compare with sparse F, pv = f_regression(X, y, center=True) F_sparse, pv_sparse = f_regression(sparse.csr_matrix(X), y, center=True) assert_array_almost_equal(F_sparse, F) assert_array_almost_equal(pv_sparse, pv) # again without centering, compare with sparse F, pv = f_regression(X, y, center=False) F_sparse, pv_sparse = f_regression(sparse.csr_matrix(X), y, center=False) assert_array_almost_equal(F_sparse, F) assert_array_almost_equal(pv_sparse, pv) def test_f_regression_input_dtype(): # Test whether f_regression returns the same value # for any numeric data_type rng = np.random.RandomState(0) X = rng.rand(10, 20) y = np.arange(10).astype(np.int) F1, pv1 = f_regression(X, y) F2, pv2 = f_regression(X, y.astype(np.float)) assert_array_almost_equal(F1, F2, 5) assert_array_almost_equal(pv1, pv2, 5) def test_f_regression_center(): # Test whether f_regression preserves dof according to 'center' argument # We use two centered variates so we have a simple relationship between # F-score with variates centering and F-score without variates centering. # Create toy example X = np.arange(-5, 6).reshape(-1, 1) # X has zero mean n_samples = X.size Y = np.ones(n_samples) Y[::2] *= -1. Y[0] = 0. # have Y mean being null F1, _ = f_regression(X, Y, center=True) F2, _ = f_regression(X, Y, center=False) assert_array_almost_equal(F1 * (n_samples - 1.) / (n_samples - 2.), F2) assert_almost_equal(F2[0], 0.232558139) # value from statsmodels OLS def test_f_classif_multi_class(): # Test whether the F test yields meaningful results # on a simple simulated classification problem X, y = make_classification(n_samples=200, n_features=20, n_informative=3, n_redundant=2, n_repeated=0, n_classes=8, n_clusters_per_class=1, flip_y=0.0, class_sep=10, shuffle=False, random_state=0) F, pv = f_classif(X, y) assert_true((F > 0).all()) assert_true((pv > 0).all()) assert_true((pv < 1).all()) assert_true((pv[:5] < 0.05).all()) assert_true((pv[5:] > 1.e-4).all()) def test_select_percentile_classif(): # Test whether the relative univariate feature selection # gets the correct items in a simple classification problem # with the percentile heuristic X, y = make_classification(n_samples=200, n_features=20, n_informative=3, n_redundant=2, n_repeated=0, n_classes=8, n_clusters_per_class=1, flip_y=0.0, class_sep=10, shuffle=False, random_state=0) univariate_filter = SelectPercentile(f_classif, percentile=25) X_r = univariate_filter.fit(X, y).transform(X) X_r2 = GenericUnivariateSelect(f_classif, mode='percentile', param=25).fit(X, y).transform(X) assert_array_equal(X_r, X_r2) support = univariate_filter.get_support() gtruth = np.zeros(20) gtruth[:5] = 1 assert_array_equal(support, gtruth) def test_select_percentile_classif_sparse(): # Test whether the relative univariate feature selection # gets the correct items in a simple classification problem # with the percentile heuristic X, y = make_classification(n_samples=200, n_features=20, n_informative=3, n_redundant=2, n_repeated=0, n_classes=8, n_clusters_per_class=1, flip_y=0.0, class_sep=10, shuffle=False, random_state=0) X = sparse.csr_matrix(X) univariate_filter = SelectPercentile(f_classif, percentile=25) X_r = univariate_filter.fit(X, y).transform(X) X_r2 = GenericUnivariateSelect(f_classif, mode='percentile', param=25).fit(X, y).transform(X) assert_array_equal(X_r.toarray(), X_r2.toarray()) support = univariate_filter.get_support() gtruth = np.zeros(20) gtruth[:5] = 1 assert_array_equal(support, gtruth) X_r2inv = univariate_filter.inverse_transform(X_r2) assert_true(sparse.issparse(X_r2inv)) support_mask = safe_mask(X_r2inv, support) assert_equal(X_r2inv.shape, X.shape) assert_array_equal(X_r2inv[:, support_mask].toarray(), X_r.toarray()) # Check other columns are empty assert_equal(X_r2inv.getnnz(), X_r.getnnz()) ############################################################################## # Test univariate selection in classification settings def test_select_kbest_classif(): # Test whether the relative univariate feature selection # gets the correct items in a simple classification problem # with the k best heuristic X, y = make_classification(n_samples=200, n_features=20, n_informative=3, n_redundant=2, n_repeated=0, n_classes=8, n_clusters_per_class=1, flip_y=0.0, class_sep=10, shuffle=False, random_state=0) univariate_filter = SelectKBest(f_classif, k=5) X_r = univariate_filter.fit(X, y).transform(X) X_r2 = GenericUnivariateSelect( f_classif, mode='k_best', param=5).fit(X, y).transform(X) assert_array_equal(X_r, X_r2) support = univariate_filter.get_support() gtruth = np.zeros(20) gtruth[:5] = 1 assert_array_equal(support, gtruth) def test_select_kbest_all(): # Test whether k="all" correctly returns all features. X, y = make_classification(n_samples=20, n_features=10, shuffle=False, random_state=0) univariate_filter = SelectKBest(f_classif, k='all') X_r = univariate_filter.fit(X, y).transform(X) assert_array_equal(X, X_r) def test_select_kbest_zero(): # Test whether k=0 correctly returns no features. X, y = make_classification(n_samples=20, n_features=10, shuffle=False, random_state=0) univariate_filter = SelectKBest(f_classif, k=0) univariate_filter.fit(X, y) support = univariate_filter.get_support() gtruth = np.zeros(10, dtype=bool) assert_array_equal(support, gtruth) X_selected = assert_warns_message(UserWarning, 'No features were selected', univariate_filter.transform, X) assert_equal(X_selected.shape, (20, 0)) def test_select_heuristics_classif(): # Test whether the relative univariate feature selection # gets the correct items in a simple classification problem # with the fdr, fwe and fpr heuristics X, y = make_classification(n_samples=200, n_features=20, n_informative=3, n_redundant=2, n_repeated=0, n_classes=8, n_clusters_per_class=1, flip_y=0.0, class_sep=10, shuffle=False, random_state=0) univariate_filter = SelectFwe(f_classif, alpha=0.01) X_r = univariate_filter.fit(X, y).transform(X) gtruth = np.zeros(20) gtruth[:5] = 1 for mode in ['fdr', 'fpr', 'fwe']: X_r2 = GenericUnivariateSelect( f_classif, mode=mode, param=0.01).fit(X, y).transform(X) assert_array_equal(X_r, X_r2) support = univariate_filter.get_support() assert_array_almost_equal(support, gtruth) ############################################################################## # Test univariate selection in regression settings def assert_best_scores_kept(score_filter): scores = score_filter.scores_ support = score_filter.get_support() assert_array_equal(np.sort(scores[support]), np.sort(scores)[-support.sum():]) def test_select_percentile_regression(): # Test whether the relative univariate feature selection # gets the correct items in a simple regression problem # with the percentile heuristic X, y = make_regression(n_samples=200, n_features=20, n_informative=5, shuffle=False, random_state=0) univariate_filter = SelectPercentile(f_regression, percentile=25) X_r = univariate_filter.fit(X, y).transform(X) assert_best_scores_kept(univariate_filter) X_r2 = GenericUnivariateSelect( f_regression, mode='percentile', param=25).fit(X, y).transform(X) assert_array_equal(X_r, X_r2) support = univariate_filter.get_support() gtruth = np.zeros(20) gtruth[:5] = 1 assert_array_equal(support, gtruth) X_2 = X.copy() X_2[:, np.logical_not(support)] = 0 assert_array_equal(X_2, univariate_filter.inverse_transform(X_r)) # Check inverse_transform respects dtype assert_array_equal(X_2.astype(bool), univariate_filter.inverse_transform(X_r.astype(bool))) def test_select_percentile_regression_full(): # Test whether the relative univariate feature selection # selects all features when '100%' is asked. X, y = make_regression(n_samples=200, n_features=20, n_informative=5, shuffle=False, random_state=0) univariate_filter = SelectPercentile(f_regression, percentile=100) X_r = univariate_filter.fit(X, y).transform(X) assert_best_scores_kept(univariate_filter) X_r2 = GenericUnivariateSelect( f_regression, mode='percentile', param=100).fit(X, y).transform(X) assert_array_equal(X_r, X_r2) support = univariate_filter.get_support() gtruth = np.ones(20) assert_array_equal(support, gtruth) def test_invalid_percentile(): X, y = make_regression(n_samples=10, n_features=20, n_informative=2, shuffle=False, random_state=0) assert_raises(ValueError, SelectPercentile(percentile=-1).fit, X, y) assert_raises(ValueError, SelectPercentile(percentile=101).fit, X, y) assert_raises(ValueError, GenericUnivariateSelect(mode='percentile', param=-1).fit, X, y) assert_raises(ValueError, GenericUnivariateSelect(mode='percentile', param=101).fit, X, y) def test_select_kbest_regression(): # Test whether the relative univariate feature selection # gets the correct items in a simple regression problem # with the k best heuristic X, y = make_regression(n_samples=200, n_features=20, n_informative=5, shuffle=False, random_state=0, noise=10) univariate_filter = SelectKBest(f_regression, k=5) X_r = univariate_filter.fit(X, y).transform(X) assert_best_scores_kept(univariate_filter) X_r2 = GenericUnivariateSelect( f_regression, mode='k_best', param=5).fit(X, y).transform(X) assert_array_equal(X_r, X_r2) support = univariate_filter.get_support() gtruth = np.zeros(20) gtruth[:5] = 1 assert_array_equal(support, gtruth) def test_select_heuristics_regression(): # Test whether the relative univariate feature selection # gets the correct items in a simple regression problem # with the fpr, fdr or fwe heuristics X, y = make_regression(n_samples=200, n_features=20, n_informative=5, shuffle=False, random_state=0, noise=10) univariate_filter = SelectFpr(f_regression, alpha=0.01) X_r = univariate_filter.fit(X, y).transform(X) gtruth = np.zeros(20) gtruth[:5] = 1 for mode in ['fdr', 'fpr', 'fwe']: X_r2 = GenericUnivariateSelect( f_regression, mode=mode, param=0.01).fit(X, y).transform(X) assert_array_equal(X_r, X_r2) support = univariate_filter.get_support() assert_array_equal(support[:5], np.ones((5, ), dtype=np.bool)) assert_less(np.sum(support[5:] == 1), 3) def test_boundary_case_ch2(): # Test boundary case, and always aim to select 1 feature. X = np.array([[10, 20], [20, 20], [20, 30]]) y = np.array([[1], [0], [0]]) scores, pvalues = chi2(X, y) assert_array_almost_equal(scores, np.array([4., 0.71428571])) assert_array_almost_equal(pvalues, np.array([0.04550026, 0.39802472])) filter_fdr = SelectFdr(chi2, alpha=0.1) filter_fdr.fit(X, y) support_fdr = filter_fdr.get_support() assert_array_equal(support_fdr, np.array([True, False])) filter_kbest = SelectKBest(chi2, k=1) filter_kbest.fit(X, y) support_kbest = filter_kbest.get_support() assert_array_equal(support_kbest, np.array([True, False])) filter_percentile = SelectPercentile(chi2, percentile=50) filter_percentile.fit(X, y) support_percentile = filter_percentile.get_support() assert_array_equal(support_percentile, np.array([True, False])) filter_fpr = SelectFpr(chi2, alpha=0.1) filter_fpr.fit(X, y) support_fpr = filter_fpr.get_support() assert_array_equal(support_fpr, np.array([True, False])) filter_fwe = SelectFwe(chi2, alpha=0.1) filter_fwe.fit(X, y) support_fwe = filter_fwe.get_support() assert_array_equal(support_fwe, np.array([True, False])) def test_select_fdr_regression(): # Test that fdr heuristic actually has low FDR. def single_fdr(alpha, n_informative, random_state): X, y = make_regression(n_samples=150, n_features=20, n_informative=n_informative, shuffle=False, random_state=random_state, noise=10) with warnings.catch_warnings(record=True): # Warnings can be raised when no features are selected # (low alpha or very noisy data) univariate_filter = SelectFdr(f_regression, alpha=alpha) X_r = univariate_filter.fit(X, y).transform(X) X_r2 = GenericUnivariateSelect( f_regression, mode='fdr', param=alpha).fit(X, y).transform(X) assert_array_equal(X_r, X_r2) support = univariate_filter.get_support() num_false_positives = np.sum(support[n_informative:] == 1) num_true_positives = np.sum(support[:n_informative] == 1) if num_false_positives == 0: return 0. false_discovery_rate = (num_false_positives / (num_true_positives + num_false_positives)) return false_discovery_rate for alpha in [0.001, 0.01, 0.1]: for n_informative in [1, 5, 10]: # As per Benjamini-Hochberg, the expected false discovery rate # should be lower than alpha: # FDR = E(FP / (TP + FP)) <= alpha false_discovery_rate = np.mean([single_fdr(alpha, n_informative, random_state) for random_state in range(100)]) assert_greater_equal(alpha, false_discovery_rate) # Make sure that the empirical false discovery rate increases # with alpha: if false_discovery_rate != 0: assert_greater(false_discovery_rate, alpha / 10) def test_select_fwe_regression(): # Test whether the relative univariate feature selection # gets the correct items in a simple regression problem # with the fwe heuristic X, y = make_regression(n_samples=200, n_features=20, n_informative=5, shuffle=False, random_state=0) univariate_filter = SelectFwe(f_regression, alpha=0.01) X_r = univariate_filter.fit(X, y).transform(X) X_r2 = GenericUnivariateSelect( f_regression, mode='fwe', param=0.01).fit(X, y).transform(X) assert_array_equal(X_r, X_r2) support = univariate_filter.get_support() gtruth = np.zeros(20) gtruth[:5] = 1 assert_array_equal(support[:5], np.ones((5, ), dtype=np.bool)) assert_less(np.sum(support[5:] == 1), 2) def test_selectkbest_tiebreaking(): # Test whether SelectKBest actually selects k features in case of ties. # Prior to 0.11, SelectKBest would return more features than requested. Xs = [[0, 1, 1], [0, 0, 1], [1, 0, 0], [1, 1, 0]] y = [1] dummy_score = lambda X, y: (X[0], X[0]) for X in Xs: sel = SelectKBest(dummy_score, k=1) X1 = ignore_warnings(sel.fit_transform)([X], y) assert_equal(X1.shape[1], 1) assert_best_scores_kept(sel) sel = SelectKBest(dummy_score, k=2) X2 = ignore_warnings(sel.fit_transform)([X], y) assert_equal(X2.shape[1], 2) assert_best_scores_kept(sel) def test_selectpercentile_tiebreaking(): # Test if SelectPercentile selects the right n_features in case of ties. Xs = [[0, 1, 1], [0, 0, 1], [1, 0, 0], [1, 1, 0]] y = [1] dummy_score = lambda X, y: (X[0], X[0]) for X in Xs: sel = SelectPercentile(dummy_score, percentile=34) X1 = ignore_warnings(sel.fit_transform)([X], y) assert_equal(X1.shape[1], 1) assert_best_scores_kept(sel) sel = SelectPercentile(dummy_score, percentile=67) X2 = ignore_warnings(sel.fit_transform)([X], y) assert_equal(X2.shape[1], 2) assert_best_scores_kept(sel) def test_tied_pvalues(): # Test whether k-best and percentiles work with tied pvalues from chi2. # chi2 will return the same p-values for the following features, but it # will return different scores. X0 = np.array([[10000, 9999, 9998], [1, 1, 1]]) y = [0, 1] for perm in itertools.permutations((0, 1, 2)): X = X0[:, perm] Xt = SelectKBest(chi2, k=2).fit_transform(X, y) assert_equal(Xt.shape, (2, 2)) assert_not_in(9998, Xt) Xt = SelectPercentile(chi2, percentile=67).fit_transform(X, y) assert_equal(Xt.shape, (2, 2)) assert_not_in(9998, Xt) def test_scorefunc_multilabel(): # Test whether k-best and percentiles works with multilabels with chi2. X = np.array([[10000, 9999, 0], [100, 9999, 0], [1000, 99, 0]]) y = [[1, 1], [0, 1], [1, 0]] Xt = SelectKBest(chi2, k=2).fit_transform(X, y) assert_equal(Xt.shape, (3, 2)) assert_not_in(0, Xt) Xt = SelectPercentile(chi2, percentile=67).fit_transform(X, y) assert_equal(Xt.shape, (3, 2)) assert_not_in(0, Xt) def test_tied_scores(): # Test for stable sorting in k-best with tied scores. X_train = np.array([[0, 0, 0], [1, 1, 1]]) y_train = [0, 1] for n_features in [1, 2, 3]: sel = SelectKBest(chi2, k=n_features).fit(X_train, y_train) X_test = sel.transform([[0, 1, 2]]) assert_array_equal(X_test[0], np.arange(3)[-n_features:]) def test_nans(): # Assert that SelectKBest and SelectPercentile can handle NaNs. # First feature has zero variance to confuse f_classif (ANOVA) and # make it return a NaN. X = [[0, 1, 0], [0, -1, -1], [0, .5, .5]] y = [1, 0, 1] for select in (SelectKBest(f_classif, 2), SelectPercentile(f_classif, percentile=67)): ignore_warnings(select.fit)(X, y) assert_array_equal(select.get_support(indices=True), np.array([1, 2])) def test_score_func_error(): X = [[0, 1, 0], [0, -1, -1], [0, .5, .5]] y = [1, 0, 1] for SelectFeatures in [SelectKBest, SelectPercentile, SelectFwe, SelectFdr, SelectFpr, GenericUnivariateSelect]: assert_raises(TypeError, SelectFeatures(score_func=10).fit, X, y) def test_invalid_k(): X = [[0, 1, 0], [0, -1, -1], [0, .5, .5]] y = [1, 0, 1] assert_raises(ValueError, SelectKBest(k=-1).fit, X, y) assert_raises(ValueError, SelectKBest(k=4).fit, X, y) assert_raises(ValueError, GenericUnivariateSelect(mode='k_best', param=-1).fit, X, y) assert_raises(ValueError, GenericUnivariateSelect(mode='k_best', param=4).fit, X, y) def test_f_classif_constant_feature(): # Test that f_classif warns if a feature is constant throughout. X, y = make_classification(n_samples=10, n_features=5) X[:, 0] = 2.0 assert_warns(UserWarning, f_classif, X, y) def test_no_feature_selected(): rng = np.random.RandomState(0) # Generate random uncorrelated data: a strict univariate test should # rejects all the features X = rng.rand(40, 10) y = rng.randint(0, 4, size=40) strict_selectors = [ SelectFwe(alpha=0.01).fit(X, y), SelectFdr(alpha=0.01).fit(X, y), SelectFpr(alpha=0.01).fit(X, y), SelectPercentile(percentile=0).fit(X, y), SelectKBest(k=0).fit(X, y), ] for selector in strict_selectors: assert_array_equal(selector.get_support(), np.zeros(10)) X_selected = assert_warns_message( UserWarning, 'No features were selected', selector.transform, X) assert_equal(X_selected.shape, (40, 0)) def test_mutual_info_classif(): X, y = make_classification(n_samples=100, n_features=5, n_informative=1, n_redundant=1, n_repeated=0, n_classes=2, n_clusters_per_class=1, flip_y=0.0, class_sep=10, shuffle=False, random_state=0) # Test in KBest mode. univariate_filter = SelectKBest(mutual_info_classif, k=2) X_r = univariate_filter.fit(X, y).transform(X) X_r2 = GenericUnivariateSelect( mutual_info_classif, mode='k_best', param=2).fit(X, y).transform(X) assert_array_equal(X_r, X_r2) support = univariate_filter.get_support() gtruth = np.zeros(5) gtruth[:2] = 1 assert_array_equal(support, gtruth) # Test in Percentile mode. univariate_filter = SelectPercentile(mutual_info_classif, percentile=40) X_r = univariate_filter.fit(X, y).transform(X) X_r2 = GenericUnivariateSelect( mutual_info_classif, mode='percentile', param=40).fit(X, y).transform(X) assert_array_equal(X_r, X_r2) support = univariate_filter.get_support() gtruth = np.zeros(5) gtruth[:2] = 1 assert_array_equal(support, gtruth) def test_mutual_info_regression(): X, y = make_regression(n_samples=100, n_features=10, n_informative=2, shuffle=False, random_state=0, noise=10) # Test in KBest mode. univariate_filter = SelectKBest(mutual_info_regression, k=2) X_r = univariate_filter.fit(X, y).transform(X) assert_best_scores_kept(univariate_filter) X_r2 = GenericUnivariateSelect( mutual_info_regression, mode='k_best', param=2).fit(X, y).transform(X) assert_array_equal(X_r, X_r2) support = univariate_filter.get_support() gtruth = np.zeros(10) gtruth[:2] = 1 assert_array_equal(support, gtruth) # Test in Percentile mode. univariate_filter = SelectPercentile(mutual_info_regression, percentile=20) X_r = univariate_filter.fit(X, y).transform(X) X_r2 = GenericUnivariateSelect(mutual_info_regression, mode='percentile', param=20).fit(X, y).transform(X) assert_array_equal(X_r, X_r2) support = univariate_filter.get_support() gtruth = np.zeros(10) gtruth[:2] = 1 assert_array_equal(support, gtruth) if __name__ == '__main__': run_module_suite()
bsd-3-clause
Akshay0724/scikit-learn
sklearn/model_selection/tests/test_split.py
12
47658
"""Test the split module""" from __future__ import division import warnings import numpy as np from scipy.sparse import coo_matrix, csc_matrix, csr_matrix from scipy import stats from scipy.misc import comb from itertools import combinations from itertools import combinations_with_replacement from sklearn.utils.testing import assert_true from sklearn.utils.testing import assert_false from sklearn.utils.testing import assert_equal from sklearn.utils.testing import assert_almost_equal from sklearn.utils.testing import assert_raises from sklearn.utils.testing import assert_raises_regexp from sklearn.utils.testing import assert_greater from sklearn.utils.testing import assert_greater_equal from sklearn.utils.testing import assert_not_equal from sklearn.utils.testing import assert_array_almost_equal from sklearn.utils.testing import assert_array_equal from sklearn.utils.testing import assert_warns_message from sklearn.utils.testing import assert_raise_message from sklearn.utils.testing import ignore_warnings from sklearn.utils.validation import _num_samples from sklearn.utils.mocking import MockDataFrame from sklearn.model_selection import cross_val_score from sklearn.model_selection import KFold from sklearn.model_selection import StratifiedKFold from sklearn.model_selection import GroupKFold from sklearn.model_selection import TimeSeriesSplit from sklearn.model_selection import LeaveOneOut from sklearn.model_selection import LeaveOneGroupOut from sklearn.model_selection import LeavePOut from sklearn.model_selection import LeavePGroupsOut from sklearn.model_selection import ShuffleSplit from sklearn.model_selection import GroupShuffleSplit from sklearn.model_selection import StratifiedShuffleSplit from sklearn.model_selection import PredefinedSplit from sklearn.model_selection import check_cv from sklearn.model_selection import train_test_split from sklearn.model_selection import GridSearchCV from sklearn.linear_model import Ridge from sklearn.model_selection._split import _validate_shuffle_split from sklearn.model_selection._split import _CVIterableWrapper from sklearn.model_selection._split import _build_repr from sklearn.datasets import load_digits from sklearn.datasets import make_classification from sklearn.externals import six from sklearn.externals.six.moves import zip from sklearn.svm import SVC X = np.ones(10) y = np.arange(10) // 2 P_sparse = coo_matrix(np.eye(5)) test_groups = ( np.array([1, 1, 1, 1, 2, 2, 2, 3, 3, 3, 3, 3]), np.array([0, 0, 0, 1, 1, 1, 2, 2, 2, 3, 3, 3]), np.array([0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2]), np.array([1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4]), [1, 1, 1, 1, 2, 2, 2, 3, 3, 3, 3, 3], ['1', '1', '1', '1', '2', '2', '2', '3', '3', '3', '3', '3']) digits = load_digits() class MockClassifier(object): """Dummy classifier to test the cross-validation""" def __init__(self, a=0, allow_nd=False): self.a = a self.allow_nd = allow_nd def fit(self, X, Y=None, sample_weight=None, class_prior=None, sparse_sample_weight=None, sparse_param=None, dummy_int=None, dummy_str=None, dummy_obj=None, callback=None): """The dummy arguments are to test that this fit function can accept non-array arguments through cross-validation, such as: - int - str (this is actually array-like) - object - function """ self.dummy_int = dummy_int self.dummy_str = dummy_str self.dummy_obj = dummy_obj if callback is not None: callback(self) if self.allow_nd: X = X.reshape(len(X), -1) if X.ndim >= 3 and not self.allow_nd: raise ValueError('X cannot be d') if sample_weight is not None: assert_true(sample_weight.shape[0] == X.shape[0], 'MockClassifier extra fit_param sample_weight.shape[0]' ' is {0}, should be {1}'.format(sample_weight.shape[0], X.shape[0])) if class_prior is not None: assert_true(class_prior.shape[0] == len(np.unique(y)), 'MockClassifier extra fit_param class_prior.shape[0]' ' is {0}, should be {1}'.format(class_prior.shape[0], len(np.unique(y)))) if sparse_sample_weight is not None: fmt = ('MockClassifier extra fit_param sparse_sample_weight' '.shape[0] is {0}, should be {1}') assert_true(sparse_sample_weight.shape[0] == X.shape[0], fmt.format(sparse_sample_weight.shape[0], X.shape[0])) if sparse_param is not None: fmt = ('MockClassifier extra fit_param sparse_param.shape ' 'is ({0}, {1}), should be ({2}, {3})') assert_true(sparse_param.shape == P_sparse.shape, fmt.format(sparse_param.shape[0], sparse_param.shape[1], P_sparse.shape[0], P_sparse.shape[1])) return self def predict(self, T): if self.allow_nd: T = T.reshape(len(T), -1) return T[:, 0] def score(self, X=None, Y=None): return 1. / (1 + np.abs(self.a)) def get_params(self, deep=False): return {'a': self.a, 'allow_nd': self.allow_nd} @ignore_warnings def test_cross_validator_with_default_params(): n_samples = 4 n_unique_groups = 4 n_splits = 2 p = 2 n_shuffle_splits = 10 # (the default value) X = np.array([[1, 2], [3, 4], [5, 6], [7, 8]]) X_1d = np.array([1, 2, 3, 4]) y = np.array([1, 1, 2, 2]) groups = np.array([1, 2, 3, 4]) loo = LeaveOneOut() lpo = LeavePOut(p) kf = KFold(n_splits) skf = StratifiedKFold(n_splits) lolo = LeaveOneGroupOut() lopo = LeavePGroupsOut(p) ss = ShuffleSplit(random_state=0) ps = PredefinedSplit([1, 1, 2, 2]) # n_splits = np of unique folds = 2 loo_repr = "LeaveOneOut()" lpo_repr = "LeavePOut(p=2)" kf_repr = "KFold(n_splits=2, random_state=None, shuffle=False)" skf_repr = "StratifiedKFold(n_splits=2, random_state=None, shuffle=False)" lolo_repr = "LeaveOneGroupOut()" lopo_repr = "LeavePGroupsOut(n_groups=2)" ss_repr = ("ShuffleSplit(n_splits=10, random_state=0, test_size=0.1, " "train_size=None)") ps_repr = "PredefinedSplit(test_fold=array([1, 1, 2, 2]))" n_splits_expected = [n_samples, comb(n_samples, p), n_splits, n_splits, n_unique_groups, comb(n_unique_groups, p), n_shuffle_splits, 2] for i, (cv, cv_repr) in enumerate(zip( [loo, lpo, kf, skf, lolo, lopo, ss, ps], [loo_repr, lpo_repr, kf_repr, skf_repr, lolo_repr, lopo_repr, ss_repr, ps_repr])): # Test if get_n_splits works correctly assert_equal(n_splits_expected[i], cv.get_n_splits(X, y, groups)) # Test if the cross-validator works as expected even if # the data is 1d np.testing.assert_equal(list(cv.split(X, y, groups)), list(cv.split(X_1d, y, groups))) # Test that train, test indices returned are integers for train, test in cv.split(X, y, groups): assert_equal(np.asarray(train).dtype.kind, 'i') assert_equal(np.asarray(train).dtype.kind, 'i') # Test if the repr works without any errors assert_equal(cv_repr, repr(cv)) def check_valid_split(train, test, n_samples=None): # Use python sets to get more informative assertion failure messages train, test = set(train), set(test) # Train and test split should not overlap assert_equal(train.intersection(test), set()) if n_samples is not None: # Check that the union of train an test split cover all the indices assert_equal(train.union(test), set(range(n_samples))) def check_cv_coverage(cv, X, y, groups, expected_n_splits=None): n_samples = _num_samples(X) # Check that a all the samples appear at least once in a test fold if expected_n_splits is not None: assert_equal(cv.get_n_splits(X, y, groups), expected_n_splits) else: expected_n_splits = cv.get_n_splits(X, y, groups) collected_test_samples = set() iterations = 0 for train, test in cv.split(X, y, groups): check_valid_split(train, test, n_samples=n_samples) iterations += 1 collected_test_samples.update(test) # Check that the accumulated test samples cover the whole dataset assert_equal(iterations, expected_n_splits) if n_samples is not None: assert_equal(collected_test_samples, set(range(n_samples))) def test_kfold_valueerrors(): X1 = np.array([[1, 2], [3, 4], [5, 6]]) X2 = np.array([[1, 2], [3, 4], [5, 6], [7, 8], [9, 10]]) # Check that errors are raised if there is not enough samples assert_raises(ValueError, next, KFold(4).split(X1)) # Check that a warning is raised if the least populated class has too few # members. y = np.array([3, 3, -1, -1, 3]) skf_3 = StratifiedKFold(3) assert_warns_message(Warning, "The least populated class", next, skf_3.split(X2, y)) # Check that despite the warning the folds are still computed even # though all the classes are not necessarily represented at on each # side of the split at each split with warnings.catch_warnings(): warnings.simplefilter("ignore") check_cv_coverage(skf_3, X2, y, groups=None, expected_n_splits=3) # Check that errors are raised if all n_groups for individual # classes are less than n_splits. y = np.array([3, 3, -1, -1, 2]) assert_raises(ValueError, next, skf_3.split(X2, y)) # Error when number of folds is <= 1 assert_raises(ValueError, KFold, 0) assert_raises(ValueError, KFold, 1) error_string = ("k-fold cross-validation requires at least one" " train/test split") assert_raise_message(ValueError, error_string, StratifiedKFold, 0) assert_raise_message(ValueError, error_string, StratifiedKFold, 1) # When n_splits is not integer: assert_raises(ValueError, KFold, 1.5) assert_raises(ValueError, KFold, 2.0) assert_raises(ValueError, StratifiedKFold, 1.5) assert_raises(ValueError, StratifiedKFold, 2.0) # When shuffle is not a bool: assert_raises(TypeError, KFold, n_splits=4, shuffle=None) def test_kfold_indices(): # Check all indices are returned in the test folds X1 = np.ones(18) kf = KFold(3) check_cv_coverage(kf, X1, y=None, groups=None, expected_n_splits=3) # Check all indices are returned in the test folds even when equal-sized # folds are not possible X2 = np.ones(17) kf = KFold(3) check_cv_coverage(kf, X2, y=None, groups=None, expected_n_splits=3) # Check if get_n_splits returns the number of folds assert_equal(5, KFold(5).get_n_splits(X2)) def test_kfold_no_shuffle(): # Manually check that KFold preserves the data ordering on toy datasets X2 = [[1, 2], [3, 4], [5, 6], [7, 8], [9, 10]] splits = KFold(2).split(X2[:-1]) train, test = next(splits) assert_array_equal(test, [0, 1]) assert_array_equal(train, [2, 3]) train, test = next(splits) assert_array_equal(test, [2, 3]) assert_array_equal(train, [0, 1]) splits = KFold(2).split(X2) train, test = next(splits) assert_array_equal(test, [0, 1, 2]) assert_array_equal(train, [3, 4]) train, test = next(splits) assert_array_equal(test, [3, 4]) assert_array_equal(train, [0, 1, 2]) def test_stratified_kfold_no_shuffle(): # Manually check that StratifiedKFold preserves the data ordering as much # as possible on toy datasets in order to avoid hiding sample dependencies # when possible X, y = np.ones(4), [1, 1, 0, 0] splits = StratifiedKFold(2).split(X, y) train, test = next(splits) assert_array_equal(test, [0, 2]) assert_array_equal(train, [1, 3]) train, test = next(splits) assert_array_equal(test, [1, 3]) assert_array_equal(train, [0, 2]) X, y = np.ones(7), [1, 1, 1, 0, 0, 0, 0] splits = StratifiedKFold(2).split(X, y) train, test = next(splits) assert_array_equal(test, [0, 1, 3, 4]) assert_array_equal(train, [2, 5, 6]) train, test = next(splits) assert_array_equal(test, [2, 5, 6]) assert_array_equal(train, [0, 1, 3, 4]) # Check if get_n_splits returns the number of folds assert_equal(5, StratifiedKFold(5).get_n_splits(X, y)) # Make sure string labels are also supported X = np.ones(7) y1 = ['1', '1', '1', '0', '0', '0', '0'] y2 = [1, 1, 1, 0, 0, 0, 0] np.testing.assert_equal( list(StratifiedKFold(2).split(X, y1)), list(StratifiedKFold(2).split(X, y2))) def test_stratified_kfold_ratios(): # Check that stratified kfold preserves class ratios in individual splits # Repeat with shuffling turned off and on n_samples = 1000 X = np.ones(n_samples) y = np.array([4] * int(0.10 * n_samples) + [0] * int(0.89 * n_samples) + [1] * int(0.01 * n_samples)) for shuffle in (False, True): for train, test in StratifiedKFold(5, shuffle=shuffle).split(X, y): assert_almost_equal(np.sum(y[train] == 4) / len(train), 0.10, 2) assert_almost_equal(np.sum(y[train] == 0) / len(train), 0.89, 2) assert_almost_equal(np.sum(y[train] == 1) / len(train), 0.01, 2) assert_almost_equal(np.sum(y[test] == 4) / len(test), 0.10, 2) assert_almost_equal(np.sum(y[test] == 0) / len(test), 0.89, 2) assert_almost_equal(np.sum(y[test] == 1) / len(test), 0.01, 2) def test_kfold_balance(): # Check that KFold returns folds with balanced sizes for i in range(11, 17): kf = KFold(5).split(X=np.ones(i)) sizes = [] for _, test in kf: sizes.append(len(test)) assert_true((np.max(sizes) - np.min(sizes)) <= 1) assert_equal(np.sum(sizes), i) def test_stratifiedkfold_balance(): # Check that KFold returns folds with balanced sizes (only when # stratification is possible) # Repeat with shuffling turned off and on X = np.ones(17) y = [0] * 3 + [1] * 14 for shuffle in (True, False): cv = StratifiedKFold(3, shuffle=shuffle) for i in range(11, 17): skf = cv.split(X[:i], y[:i]) sizes = [] for _, test in skf: sizes.append(len(test)) assert_true((np.max(sizes) - np.min(sizes)) <= 1) assert_equal(np.sum(sizes), i) def test_shuffle_kfold(): # Check the indices are shuffled properly kf = KFold(3) kf2 = KFold(3, shuffle=True, random_state=0) kf3 = KFold(3, shuffle=True, random_state=1) X = np.ones(300) all_folds = np.zeros(300) for (tr1, te1), (tr2, te2), (tr3, te3) in zip( kf.split(X), kf2.split(X), kf3.split(X)): for tr_a, tr_b in combinations((tr1, tr2, tr3), 2): # Assert that there is no complete overlap assert_not_equal(len(np.intersect1d(tr_a, tr_b)), len(tr1)) # Set all test indices in successive iterations of kf2 to 1 all_folds[te2] = 1 # Check that all indices are returned in the different test folds assert_equal(sum(all_folds), 300) def test_shuffle_kfold_stratifiedkfold_reproducibility(): # Check that when the shuffle is True multiple split calls produce the # same split when random_state is set X = np.ones(15) # Divisible by 3 y = [0] * 7 + [1] * 8 X2 = np.ones(16) # Not divisible by 3 y2 = [0] * 8 + [1] * 8 kf = KFold(3, shuffle=True, random_state=0) skf = StratifiedKFold(3, shuffle=True, random_state=0) for cv in (kf, skf): np.testing.assert_equal(list(cv.split(X, y)), list(cv.split(X, y))) np.testing.assert_equal(list(cv.split(X2, y2)), list(cv.split(X2, y2))) kf = KFold(3, shuffle=True) skf = StratifiedKFold(3, shuffle=True) for cv in (kf, skf): for data in zip((X, X2), (y, y2)): try: np.testing.assert_equal(list(cv.split(*data)), list(cv.split(*data))) except AssertionError: pass else: raise AssertionError("The splits for data, %s, are same even " "when random state is not set" % data) def test_shuffle_stratifiedkfold(): # Check that shuffling is happening when requested, and for proper # sample coverage X_40 = np.ones(40) y = [0] * 20 + [1] * 20 kf0 = StratifiedKFold(5, shuffle=True, random_state=0) kf1 = StratifiedKFold(5, shuffle=True, random_state=1) for (_, test0), (_, test1) in zip(kf0.split(X_40, y), kf1.split(X_40, y)): assert_not_equal(set(test0), set(test1)) check_cv_coverage(kf0, X_40, y, groups=None, expected_n_splits=5) def test_kfold_can_detect_dependent_samples_on_digits(): # see #2372 # The digits samples are dependent: they are apparently grouped by authors # although we don't have any information on the groups segment locations # for this data. We can highlight this fact by computing k-fold cross- # validation with and without shuffling: we observe that the shuffling case # wrongly makes the IID assumption and is therefore too optimistic: it # estimates a much higher accuracy (around 0.93) than that the non # shuffling variant (around 0.81). X, y = digits.data[:600], digits.target[:600] model = SVC(C=10, gamma=0.005) n_splits = 3 cv = KFold(n_splits=n_splits, shuffle=False) mean_score = cross_val_score(model, X, y, cv=cv).mean() assert_greater(0.92, mean_score) assert_greater(mean_score, 0.80) # Shuffling the data artificially breaks the dependency and hides the # overfitting of the model with regards to the writing style of the authors # by yielding a seriously overestimated score: cv = KFold(n_splits, shuffle=True, random_state=0) mean_score = cross_val_score(model, X, y, cv=cv).mean() assert_greater(mean_score, 0.92) cv = KFold(n_splits, shuffle=True, random_state=1) mean_score = cross_val_score(model, X, y, cv=cv).mean() assert_greater(mean_score, 0.92) # Similarly, StratifiedKFold should try to shuffle the data as little # as possible (while respecting the balanced class constraints) # and thus be able to detect the dependency by not overestimating # the CV score either. As the digits dataset is approximately balanced # the estimated mean score is close to the score measured with # non-shuffled KFold cv = StratifiedKFold(n_splits) mean_score = cross_val_score(model, X, y, cv=cv).mean() assert_greater(0.93, mean_score) assert_greater(mean_score, 0.80) def test_shuffle_split(): ss1 = ShuffleSplit(test_size=0.2, random_state=0).split(X) ss2 = ShuffleSplit(test_size=2, random_state=0).split(X) ss3 = ShuffleSplit(test_size=np.int32(2), random_state=0).split(X) for typ in six.integer_types: ss4 = ShuffleSplit(test_size=typ(2), random_state=0).split(X) for t1, t2, t3, t4 in zip(ss1, ss2, ss3, ss4): assert_array_equal(t1[0], t2[0]) assert_array_equal(t2[0], t3[0]) assert_array_equal(t3[0], t4[0]) assert_array_equal(t1[1], t2[1]) assert_array_equal(t2[1], t3[1]) assert_array_equal(t3[1], t4[1]) def test_stratified_shuffle_split_init(): X = np.arange(7) y = np.asarray([0, 1, 1, 1, 2, 2, 2]) # Check that error is raised if there is a class with only one sample assert_raises(ValueError, next, StratifiedShuffleSplit(3, 0.2).split(X, y)) # Check that error is raised if the test set size is smaller than n_classes assert_raises(ValueError, next, StratifiedShuffleSplit(3, 2).split(X, y)) # Check that error is raised if the train set size is smaller than # n_classes assert_raises(ValueError, next, StratifiedShuffleSplit(3, 3, 2).split(X, y)) X = np.arange(9) y = np.asarray([0, 0, 0, 1, 1, 1, 2, 2, 2]) # Check that errors are raised if there is not enough samples assert_raises(ValueError, StratifiedShuffleSplit, 3, 0.5, 0.6) assert_raises(ValueError, next, StratifiedShuffleSplit(3, 8, 0.6).split(X, y)) assert_raises(ValueError, next, StratifiedShuffleSplit(3, 0.6, 8).split(X, y)) # Train size or test size too small assert_raises(ValueError, next, StratifiedShuffleSplit(train_size=2).split(X, y)) assert_raises(ValueError, next, StratifiedShuffleSplit(test_size=2).split(X, y)) def test_stratified_shuffle_split_respects_test_size(): y = np.array([0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2]) test_size = 5 train_size = 10 sss = StratifiedShuffleSplit(6, test_size=test_size, train_size=train_size, random_state=0).split(np.ones(len(y)), y) for train, test in sss: assert_equal(len(train), train_size) assert_equal(len(test), test_size) def test_stratified_shuffle_split_iter(): ys = [np.array([1, 1, 1, 1, 2, 2, 2, 3, 3, 3, 3, 3]), np.array([0, 0, 0, 1, 1, 1, 2, 2, 2, 3, 3, 3]), np.array([0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2] * 2), np.array([1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4]), np.array([-1] * 800 + [1] * 50), np.concatenate([[i] * (100 + i) for i in range(11)]), [1, 1, 1, 1, 2, 2, 2, 3, 3, 3, 3, 3], ['1', '1', '1', '1', '2', '2', '2', '3', '3', '3', '3', '3'], ] for y in ys: sss = StratifiedShuffleSplit(6, test_size=0.33, random_state=0).split(np.ones(len(y)), y) y = np.asanyarray(y) # To make it indexable for y[train] # this is how test-size is computed internally # in _validate_shuffle_split test_size = np.ceil(0.33 * len(y)) train_size = len(y) - test_size for train, test in sss: assert_array_equal(np.unique(y[train]), np.unique(y[test])) # Checks if folds keep classes proportions p_train = (np.bincount(np.unique(y[train], return_inverse=True)[1]) / float(len(y[train]))) p_test = (np.bincount(np.unique(y[test], return_inverse=True)[1]) / float(len(y[test]))) assert_array_almost_equal(p_train, p_test, 1) assert_equal(len(train) + len(test), y.size) assert_equal(len(train), train_size) assert_equal(len(test), test_size) assert_array_equal(np.lib.arraysetops.intersect1d(train, test), []) def test_stratified_shuffle_split_even(): # Test the StratifiedShuffleSplit, indices are drawn with a # equal chance n_folds = 5 n_splits = 1000 def assert_counts_are_ok(idx_counts, p): # Here we test that the distribution of the counts # per index is close enough to a binomial threshold = 0.05 / n_splits bf = stats.binom(n_splits, p) for count in idx_counts: prob = bf.pmf(count) assert_true(prob > threshold, "An index is not drawn with chance corresponding " "to even draws") for n_samples in (6, 22): groups = np.array((n_samples // 2) * [0, 1]) splits = StratifiedShuffleSplit(n_splits=n_splits, test_size=1. / n_folds, random_state=0) train_counts = [0] * n_samples test_counts = [0] * n_samples n_splits_actual = 0 for train, test in splits.split(X=np.ones(n_samples), y=groups): n_splits_actual += 1 for counter, ids in [(train_counts, train), (test_counts, test)]: for id in ids: counter[id] += 1 assert_equal(n_splits_actual, n_splits) n_train, n_test = _validate_shuffle_split( n_samples, test_size=1. / n_folds, train_size=1. - (1. / n_folds)) assert_equal(len(train), n_train) assert_equal(len(test), n_test) assert_equal(len(set(train).intersection(test)), 0) group_counts = np.unique(groups) assert_equal(splits.test_size, 1.0 / n_folds) assert_equal(n_train + n_test, len(groups)) assert_equal(len(group_counts), 2) ex_test_p = float(n_test) / n_samples ex_train_p = float(n_train) / n_samples assert_counts_are_ok(train_counts, ex_train_p) assert_counts_are_ok(test_counts, ex_test_p) def test_stratified_shuffle_split_overlap_train_test_bug(): # See https://github.com/scikit-learn/scikit-learn/issues/6121 for # the original bug report y = [0, 1, 2, 3] * 3 + [4, 5] * 5 X = np.ones_like(y) sss = StratifiedShuffleSplit(n_splits=1, test_size=0.5, random_state=0) train, test = next(iter(sss.split(X=X, y=y))) assert_array_equal(np.intersect1d(train, test), []) def test_predefinedsplit_with_kfold_split(): # Check that PredefinedSplit can reproduce a split generated by Kfold. folds = -1 * np.ones(10) kf_train = [] kf_test = [] for i, (train_ind, test_ind) in enumerate(KFold(5, shuffle=True).split(X)): kf_train.append(train_ind) kf_test.append(test_ind) folds[test_ind] = i ps_train = [] ps_test = [] ps = PredefinedSplit(folds) # n_splits is simply the no of unique folds assert_equal(len(np.unique(folds)), ps.get_n_splits()) for train_ind, test_ind in ps.split(): ps_train.append(train_ind) ps_test.append(test_ind) assert_array_equal(ps_train, kf_train) assert_array_equal(ps_test, kf_test) def test_group_shuffle_split(): for groups_i in test_groups: X = y = np.ones(len(groups_i)) n_splits = 6 test_size = 1./3 slo = GroupShuffleSplit(n_splits, test_size=test_size, random_state=0) # Make sure the repr works repr(slo) # Test that the length is correct assert_equal(slo.get_n_splits(X, y, groups=groups_i), n_splits) l_unique = np.unique(groups_i) l = np.asarray(groups_i) for train, test in slo.split(X, y, groups=groups_i): # First test: no train group is in the test set and vice versa l_train_unique = np.unique(l[train]) l_test_unique = np.unique(l[test]) assert_false(np.any(np.in1d(l[train], l_test_unique))) assert_false(np.any(np.in1d(l[test], l_train_unique))) # Second test: train and test add up to all the data assert_equal(l[train].size + l[test].size, l.size) # Third test: train and test are disjoint assert_array_equal(np.intersect1d(train, test), []) # Fourth test: # unique train and test groups are correct, +- 1 for rounding error assert_true(abs(len(l_test_unique) - round(test_size * len(l_unique))) <= 1) assert_true(abs(len(l_train_unique) - round((1.0 - test_size) * len(l_unique))) <= 1) def test_leave_one_p_group_out(): logo = LeaveOneGroupOut() lpgo_1 = LeavePGroupsOut(n_groups=1) lpgo_2 = LeavePGroupsOut(n_groups=2) # Make sure the repr works assert_equal(repr(logo), 'LeaveOneGroupOut()') assert_equal(repr(lpgo_1), 'LeavePGroupsOut(n_groups=1)') assert_equal(repr(lpgo_2), 'LeavePGroupsOut(n_groups=2)') assert_equal(repr(LeavePGroupsOut(n_groups=3)), 'LeavePGroupsOut(n_groups=3)') for j, (cv, p_groups_out) in enumerate(((logo, 1), (lpgo_1, 1), (lpgo_2, 2))): for i, groups_i in enumerate(test_groups): n_groups = len(np.unique(groups_i)) n_splits = (n_groups if p_groups_out == 1 else n_groups * (n_groups - 1) / 2) X = y = np.ones(len(groups_i)) # Test that the length is correct assert_equal(cv.get_n_splits(X, y, groups=groups_i), n_splits) groups_arr = np.asarray(groups_i) # Split using the original list / array / list of string groups_i for train, test in cv.split(X, y, groups=groups_i): # First test: no train group is in the test set and vice versa assert_array_equal(np.intersect1d(groups_arr[train], groups_arr[test]).tolist(), []) # Second test: train and test add up to all the data assert_equal(len(train) + len(test), len(groups_i)) # Third test: # The number of groups in test must be equal to p_groups_out assert_true(np.unique(groups_arr[test]).shape[0], p_groups_out) def test_leave_group_out_changing_groups(): # Check that LeaveOneGroupOut and LeavePGroupsOut work normally if # the groups variable is changed before calling split groups = np.array([0, 1, 2, 1, 1, 2, 0, 0]) X = np.ones(len(groups)) groups_changing = np.array(groups, copy=True) lolo = LeaveOneGroupOut().split(X, groups=groups) lolo_changing = LeaveOneGroupOut().split(X, groups=groups) lplo = LeavePGroupsOut(n_groups=2).split(X, groups=groups) lplo_changing = LeavePGroupsOut(n_groups=2).split(X, groups=groups) groups_changing[:] = 0 for llo, llo_changing in [(lolo, lolo_changing), (lplo, lplo_changing)]: for (train, test), (train_chan, test_chan) in zip(llo, llo_changing): assert_array_equal(train, train_chan) assert_array_equal(test, test_chan) # n_splits = no of 2 (p) group combinations of the unique groups = 3C2 = 3 assert_equal( 3, LeavePGroupsOut(n_groups=2).get_n_splits(X, y=X, groups=groups)) # n_splits = no of unique groups (C(uniq_lbls, 1) = n_unique_groups) assert_equal(3, LeaveOneGroupOut().get_n_splits(X, y=X, groups=groups)) def test_leave_one_p_group_out_error_on_fewer_number_of_groups(): X = y = groups = np.ones(0) assert_raise_message(ValueError, "Found array with 0 sample(s)", next, LeaveOneGroupOut().split(X, y, groups)) X = y = groups = np.ones(1) msg = ("The groups parameter contains fewer than 2 unique groups ([ 1.]). " "LeaveOneGroupOut expects at least 2.") assert_raise_message(ValueError, msg, next, LeaveOneGroupOut().split(X, y, groups)) X = y = groups = np.ones(1) msg = ("The groups parameter contains fewer than (or equal to) n_groups " "(3) numbers of unique groups ([ 1.]). LeavePGroupsOut expects " "that at least n_groups + 1 (4) unique groups be present") assert_raise_message(ValueError, msg, next, LeavePGroupsOut(n_groups=3).split(X, y, groups)) X = y = groups = np.arange(3) msg = ("The groups parameter contains fewer than (or equal to) n_groups " "(3) numbers of unique groups ([0 1 2]). LeavePGroupsOut expects " "that at least n_groups + 1 (4) unique groups be present") assert_raise_message(ValueError, msg, next, LeavePGroupsOut(n_groups=3).split(X, y, groups)) def test_train_test_split_errors(): assert_raises(ValueError, train_test_split) assert_raises(ValueError, train_test_split, range(3), train_size=1.1) assert_raises(ValueError, train_test_split, range(3), test_size=0.6, train_size=0.6) assert_raises(ValueError, train_test_split, range(3), test_size=np.float32(0.6), train_size=np.float32(0.6)) assert_raises(ValueError, train_test_split, range(3), test_size="wrong_type") assert_raises(ValueError, train_test_split, range(3), test_size=2, train_size=4) assert_raises(TypeError, train_test_split, range(3), some_argument=1.1) assert_raises(ValueError, train_test_split, range(3), range(42)) def test_train_test_split(): X = np.arange(100).reshape((10, 10)) X_s = coo_matrix(X) y = np.arange(10) # simple test split = train_test_split(X, y, test_size=None, train_size=.5) X_train, X_test, y_train, y_test = split assert_equal(len(y_test), len(y_train)) # test correspondence of X and y assert_array_equal(X_train[:, 0], y_train * 10) assert_array_equal(X_test[:, 0], y_test * 10) # don't convert lists to anything else by default split = train_test_split(X, X_s, y.tolist()) X_train, X_test, X_s_train, X_s_test, y_train, y_test = split assert_true(isinstance(y_train, list)) assert_true(isinstance(y_test, list)) # allow nd-arrays X_4d = np.arange(10 * 5 * 3 * 2).reshape(10, 5, 3, 2) y_3d = np.arange(10 * 7 * 11).reshape(10, 7, 11) split = train_test_split(X_4d, y_3d) assert_equal(split[0].shape, (7, 5, 3, 2)) assert_equal(split[1].shape, (3, 5, 3, 2)) assert_equal(split[2].shape, (7, 7, 11)) assert_equal(split[3].shape, (3, 7, 11)) # test stratification option y = np.array([1, 1, 1, 1, 2, 2, 2, 2]) for test_size, exp_test_size in zip([2, 4, 0.25, 0.5, 0.75], [2, 4, 2, 4, 6]): train, test = train_test_split(y, test_size=test_size, stratify=y, random_state=0) assert_equal(len(test), exp_test_size) assert_equal(len(test) + len(train), len(y)) # check the 1:1 ratio of ones and twos in the data is preserved assert_equal(np.sum(train == 1), np.sum(train == 2)) @ignore_warnings def train_test_split_pandas(): # check train_test_split doesn't destroy pandas dataframe types = [MockDataFrame] try: from pandas import DataFrame types.append(DataFrame) except ImportError: pass for InputFeatureType in types: # X dataframe X_df = InputFeatureType(X) X_train, X_test = train_test_split(X_df) assert_true(isinstance(X_train, InputFeatureType)) assert_true(isinstance(X_test, InputFeatureType)) def train_test_split_sparse(): # check that train_test_split converts scipy sparse matrices # to csr, as stated in the documentation X = np.arange(100).reshape((10, 10)) sparse_types = [csr_matrix, csc_matrix, coo_matrix] for InputFeatureType in sparse_types: X_s = InputFeatureType(X) X_train, X_test = train_test_split(X_s) assert_true(isinstance(X_train, csr_matrix)) assert_true(isinstance(X_test, csr_matrix)) def train_test_split_mock_pandas(): # X mock dataframe X_df = MockDataFrame(X) X_train, X_test = train_test_split(X_df) assert_true(isinstance(X_train, MockDataFrame)) assert_true(isinstance(X_test, MockDataFrame)) X_train_arr, X_test_arr = train_test_split(X_df) def train_test_split_list_input(): # Check that when y is a list / list of string labels, it works. X = np.ones(7) y1 = ['1'] * 4 + ['0'] * 3 y2 = np.hstack((np.ones(4), np.zeros(3))) y3 = y2.tolist() for stratify in (True, False): X_train1, X_test1, y_train1, y_test1 = train_test_split( X, y1, stratify=y1 if stratify else None, random_state=0) X_train2, X_test2, y_train2, y_test2 = train_test_split( X, y2, stratify=y2 if stratify else None, random_state=0) X_train3, X_test3, y_train3, y_test3 = train_test_split( X, y3, stratify=y3 if stratify else None, random_state=0) np.testing.assert_equal(X_train1, X_train2) np.testing.assert_equal(y_train2, y_train3) np.testing.assert_equal(X_test1, X_test3) np.testing.assert_equal(y_test3, y_test2) def test_shufflesplit_errors(): # When the {test|train}_size is a float/invalid, error is raised at init assert_raises(ValueError, ShuffleSplit, test_size=None, train_size=None) assert_raises(ValueError, ShuffleSplit, test_size=2.0) assert_raises(ValueError, ShuffleSplit, test_size=1.0) assert_raises(ValueError, ShuffleSplit, test_size=0.1, train_size=0.95) assert_raises(ValueError, ShuffleSplit, train_size=1j) # When the {test|train}_size is an int, validation is based on the input X # and happens at split(...) assert_raises(ValueError, next, ShuffleSplit(test_size=11).split(X)) assert_raises(ValueError, next, ShuffleSplit(test_size=10).split(X)) assert_raises(ValueError, next, ShuffleSplit(test_size=8, train_size=3).split(X)) def test_shufflesplit_reproducible(): # Check that iterating twice on the ShuffleSplit gives the same # sequence of train-test when the random_state is given ss = ShuffleSplit(random_state=21) assert_array_equal(list(a for a, b in ss.split(X)), list(a for a, b in ss.split(X))) def test_stratifiedshufflesplit_list_input(): # Check that when y is a list / list of string labels, it works. sss = StratifiedShuffleSplit(test_size=2, random_state=42) X = np.ones(7) y1 = ['1'] * 4 + ['0'] * 3 y2 = np.hstack((np.ones(4), np.zeros(3))) y3 = y2.tolist() np.testing.assert_equal(list(sss.split(X, y1)), list(sss.split(X, y2))) np.testing.assert_equal(list(sss.split(X, y3)), list(sss.split(X, y2))) def test_train_test_split_allow_nans(): # Check that train_test_split allows input data with NaNs X = np.arange(200, dtype=np.float64).reshape(10, -1) X[2, :] = np.nan y = np.repeat([0, 1], X.shape[0] / 2) train_test_split(X, y, test_size=0.2, random_state=42) def test_check_cv(): X = np.ones(9) cv = check_cv(3, classifier=False) # Use numpy.testing.assert_equal which recursively compares # lists of lists np.testing.assert_equal(list(KFold(3).split(X)), list(cv.split(X))) y_binary = np.array([0, 1, 0, 1, 0, 0, 1, 1, 1]) cv = check_cv(3, y_binary, classifier=True) np.testing.assert_equal(list(StratifiedKFold(3).split(X, y_binary)), list(cv.split(X, y_binary))) y_multiclass = np.array([0, 1, 0, 1, 2, 1, 2, 0, 2]) cv = check_cv(3, y_multiclass, classifier=True) np.testing.assert_equal(list(StratifiedKFold(3).split(X, y_multiclass)), list(cv.split(X, y_multiclass))) X = np.ones(5) y_multilabel = np.array([[0, 0, 0, 0], [0, 1, 1, 0], [0, 0, 0, 1], [1, 1, 0, 1], [0, 0, 1, 0]]) cv = check_cv(3, y_multilabel, classifier=True) np.testing.assert_equal(list(KFold(3).split(X)), list(cv.split(X))) y_multioutput = np.array([[1, 2], [0, 3], [0, 0], [3, 1], [2, 0]]) cv = check_cv(3, y_multioutput, classifier=True) np.testing.assert_equal(list(KFold(3).split(X)), list(cv.split(X))) # Check if the old style classes are wrapped to have a split method X = np.ones(9) y_multiclass = np.array([0, 1, 0, 1, 2, 1, 2, 0, 2]) cv1 = check_cv(3, y_multiclass, classifier=True) with warnings.catch_warnings(record=True): from sklearn.cross_validation import StratifiedKFold as OldSKF cv2 = check_cv(OldSKF(y_multiclass, n_folds=3)) np.testing.assert_equal(list(cv1.split(X, y_multiclass)), list(cv2.split())) assert_raises(ValueError, check_cv, cv="lolo") def test_cv_iterable_wrapper(): y_multiclass = np.array([0, 1, 0, 1, 2, 1, 2, 0, 2]) with warnings.catch_warnings(record=True): from sklearn.cross_validation import StratifiedKFold as OldSKF cv = OldSKF(y_multiclass, n_folds=3) wrapped_old_skf = _CVIterableWrapper(cv) # Check if split works correctly np.testing.assert_equal(list(cv), list(wrapped_old_skf.split())) # Check if get_n_splits works correctly assert_equal(len(cv), wrapped_old_skf.get_n_splits()) kf_iter = KFold(n_splits=5).split(X, y) kf_iter_wrapped = check_cv(kf_iter) # Since the wrapped iterable is enlisted and stored, # split can be called any number of times to produce # consistent results. np.testing.assert_equal(list(kf_iter_wrapped.split(X, y)), list(kf_iter_wrapped.split(X, y))) # If the splits are randomized, successive calls to split yields different # results kf_randomized_iter = KFold(n_splits=5, shuffle=True).split(X, y) kf_randomized_iter_wrapped = check_cv(kf_randomized_iter) np.testing.assert_equal(list(kf_randomized_iter_wrapped.split(X, y)), list(kf_randomized_iter_wrapped.split(X, y))) try: np.testing.assert_equal(list(kf_iter_wrapped.split(X, y)), list(kf_randomized_iter_wrapped.split(X, y))) splits_are_equal = True except AssertionError: splits_are_equal = False assert_false(splits_are_equal, "If the splits are randomized, " "successive calls to split should yield different results") def test_group_kfold(): rng = np.random.RandomState(0) # Parameters of the test n_groups = 15 n_samples = 1000 n_splits = 5 X = y = np.ones(n_samples) # Construct the test data tolerance = 0.05 * n_samples # 5 percent error allowed groups = rng.randint(0, n_groups, n_samples) ideal_n_groups_per_fold = n_samples // n_splits len(np.unique(groups)) # Get the test fold indices from the test set indices of each fold folds = np.zeros(n_samples) lkf = GroupKFold(n_splits=n_splits) for i, (_, test) in enumerate(lkf.split(X, y, groups)): folds[test] = i # Check that folds have approximately the same size assert_equal(len(folds), len(groups)) for i in np.unique(folds): assert_greater_equal(tolerance, abs(sum(folds == i) - ideal_n_groups_per_fold)) # Check that each group appears only in 1 fold for group in np.unique(groups): assert_equal(len(np.unique(folds[groups == group])), 1) # Check that no group is on both sides of the split groups = np.asarray(groups, dtype=object) for train, test in lkf.split(X, y, groups): assert_equal(len(np.intersect1d(groups[train], groups[test])), 0) # Construct the test data groups = np.array(['Albert', 'Jean', 'Bertrand', 'Michel', 'Jean', 'Francis', 'Robert', 'Michel', 'Rachel', 'Lois', 'Michelle', 'Bernard', 'Marion', 'Laura', 'Jean', 'Rachel', 'Franck', 'John', 'Gael', 'Anna', 'Alix', 'Robert', 'Marion', 'David', 'Tony', 'Abel', 'Becky', 'Madmood', 'Cary', 'Mary', 'Alexandre', 'David', 'Francis', 'Barack', 'Abdoul', 'Rasha', 'Xi', 'Silvia']) n_groups = len(np.unique(groups)) n_samples = len(groups) n_splits = 5 tolerance = 0.05 * n_samples # 5 percent error allowed ideal_n_groups_per_fold = n_samples // n_splits X = y = np.ones(n_samples) # Get the test fold indices from the test set indices of each fold folds = np.zeros(n_samples) for i, (_, test) in enumerate(lkf.split(X, y, groups)): folds[test] = i # Check that folds have approximately the same size assert_equal(len(folds), len(groups)) for i in np.unique(folds): assert_greater_equal(tolerance, abs(sum(folds == i) - ideal_n_groups_per_fold)) # Check that each group appears only in 1 fold with warnings.catch_warnings(): warnings.simplefilter("ignore", DeprecationWarning) for group in np.unique(groups): assert_equal(len(np.unique(folds[groups == group])), 1) # Check that no group is on both sides of the split groups = np.asarray(groups, dtype=object) for train, test in lkf.split(X, y, groups): assert_equal(len(np.intersect1d(groups[train], groups[test])), 0) # groups can also be a list cv_iter = list(lkf.split(X, y, groups.tolist())) for (train1, test1), (train2, test2) in zip(lkf.split(X, y, groups), cv_iter): assert_array_equal(train1, train2) assert_array_equal(test1, test2) # Should fail if there are more folds than groups groups = np.array([1, 1, 1, 2, 2]) X = y = np.ones(len(groups)) assert_raises_regexp(ValueError, "Cannot have number of splits.*greater", next, GroupKFold(n_splits=3).split(X, y, groups)) def test_time_series_cv(): X = [[1, 2], [3, 4], [5, 6], [7, 8], [9, 10], [11, 12], [13, 14]] # Should fail if there are more folds than samples assert_raises_regexp(ValueError, "Cannot have number of folds.*greater", next, TimeSeriesSplit(n_splits=7).split(X)) tscv = TimeSeriesSplit(2) # Manually check that Time Series CV preserves the data # ordering on toy datasets splits = tscv.split(X[:-1]) train, test = next(splits) assert_array_equal(train, [0, 1]) assert_array_equal(test, [2, 3]) train, test = next(splits) assert_array_equal(train, [0, 1, 2, 3]) assert_array_equal(test, [4, 5]) splits = TimeSeriesSplit(2).split(X) train, test = next(splits) assert_array_equal(train, [0, 1, 2]) assert_array_equal(test, [3, 4]) train, test = next(splits) assert_array_equal(train, [0, 1, 2, 3, 4]) assert_array_equal(test, [5, 6]) # Check get_n_splits returns the correct number of splits splits = TimeSeriesSplit(2).split(X) n_splits_actual = len(list(splits)) assert_equal(n_splits_actual, tscv.get_n_splits()) assert_equal(n_splits_actual, 2) def test_nested_cv(): # Test if nested cross validation works with different combinations of cv rng = np.random.RandomState(0) X, y = make_classification(n_samples=15, n_classes=2, random_state=0) groups = rng.randint(0, 5, 15) cvs = [LeaveOneGroupOut(), LeaveOneOut(), GroupKFold(), StratifiedKFold(), StratifiedShuffleSplit(n_splits=3, random_state=0)] for inner_cv, outer_cv in combinations_with_replacement(cvs, 2): gs = GridSearchCV(Ridge(), param_grid={'alpha': [1, .1]}, cv=inner_cv) cross_val_score(gs, X=X, y=y, groups=groups, cv=outer_cv, fit_params={'groups': groups}) def test_build_repr(): class MockSplitter: def __init__(self, a, b=0, c=None): self.a = a self.b = b self.c = c def __repr__(self): return _build_repr(self) assert_equal(repr(MockSplitter(5, 6)), "MockSplitter(a=5, b=6, c=None)")
bsd-3-clause
ales-erjavec/orange
Orange/OrangeWidgets/Regression/OWRegressionTreeViewer2D.py
6
13173
""" <name> Regression Tree Graph</name> <description>Regression tree viewer (graph view).</description> <icon>icons/RegressionTreeGraph.svg</icon> <contact>Ales Erjavec (ales.erjavec(@at@)fri.uni-lj.si)</contact> <priority>2110</priority> """ from OWTreeViewer2D import * import re import Orange class RegressionTreeNode(GraphicsNode): def __init__(self, attr, tree, parent=None, *args): GraphicsNode.__init__(self, tree, parent, *args) self.attr = attr fm = QFontMetrics(self.document().defaultFont()) self.attr_text_w = fm.width(str(self.attr if self.attr else "")) self.attr_text_h = fm.lineSpacing() self.line_descent = fm.descent() def rule(self): return self.parent.rule() + [(self.parent.tree.branchSelector.classVar, self.attr)] if self.parent else [] def rect(self): rect = GraphicsNode.rect(self) rect.setRight(max(rect.right(), getattr(self, "attr_text_w", 0))) return rect def boundingRect(self): if hasattr(self, "attr"): attr_rect = QRectF(QPointF(0, -self.attr_text_h), QSizeF(self.attr_text_w, self.attr_text_h)) else: attr_rect = QRectF(0, 0, 1, 1) rect = self.rect().adjusted(-5, -5, 5, 5) return rect | GraphicsNode.boundingRect(self) | attr_rect def paint(self, painter, option, widget=None): if self.isSelected(): option.state = option.state.__xor__(QStyle.State_Selected) if self.isSelected(): painter.save() painter.setBrush(QBrush(QColor(125, 162, 206, 192))) painter.drawRoundedRect(self.boundingRect().adjusted(-2, 1, -1, -1), 10, 10)#self.borderRadius, self.borderRadius) painter.restore() painter.setFont(self.document().defaultFont()) painter.drawText(QPointF(0, -self.line_descent), str(self.attr) if self.attr else "") painter.save() painter.setBrush(self.backgroundBrush) rect = self.rect() painter.drawRoundedRect(rect.adjusted(-3, 0, 0, 0), 10, 10)#, self.borderRadius, self.borderRadius) painter.restore() painter.setClipRect(rect | QRectF(QPointF(0, 0), self.document().size())) return QGraphicsTextItem.paint(self, painter, option, widget) def parseRules(rules): def joinCont(rule1, rule2): int1, int2=["(",-1e1000,1e1000,")"], ["(",-1e1000,1e1000,")"] rule=[rule1, rule2] interval=[int1, int2] for i in [0,1]: if rule[i][1].startswith("in"): r=rule[i][1][2:] interval[i]=[r.strip(" ")[0]]+map(lambda a: float(a), r.strip("()[] ").split(","))+[r.strip(" ")[-1]] else: if "<" in rule[i][1]: interval[i][3]=("=" in rule[i][1] and "]") or ")" interval[i][2]=float(rule[i][1].strip("<>= ")) else: interval[i][0]=("=" in rule[i][1] and "[") or "(" interval[i][1]=float(rule[i][1].strip("<>= ")) inter=[None]*4 if interval[0][1]<interval[1][1] or (interval[0][1]==interval[1][1] and interval[0][0]=="["): interval.reverse() inter[:2]=interval[0][:2] if interval[0][2]>interval[1][2] or (interval[0][2]==interval[1][2] and interval[0][3]=="]"): interval.reverse() inter[2:]=interval[0][2:] if 1e1000 in inter or -1e1000 in inter: rule=((-1e1000==inter[1] and "<") or ">") rule+=(("[" in inter or "]" in inter) and "=") or "" rule+=(-1e1000==inter[1] and str(inter[2])) or str(inter[1]) else: rule="in "+inter[0]+str(inter[1])+","+str(inter[2])+inter[3] return (rule1[0], rule) def joinDisc(rule1, rule2): r1,r2=rule1[1],rule2[1] r1=re.sub("^in ","",r1) r2=re.sub("^in ","",r2) r1=r1.strip("[]=") r2=r2.strip("[]=") s1=set([s.strip(" ") for s in r1.split(",")]) s2=set([s.strip(" ") for s in r2.split(",")]) s=s1 & s2 if len(s)==1: return (rule1[0], "= "+str(list(s)[0])) else: return (rule1[0], "in ["+",".join([str(st) for st in s])+"]") rules.sort(lambda a,b: (a[0].name<b[0].name and -1) or 1 ) newRules=[rules[0]] for r in rules[1:]: if r[0].name==newRules[-1][0].name: if re.search("(a-zA-Z\"')+",r[1].lstrip("in")): newRules[-1]=joinDisc(r,newRules[-1]) else: newRules[-1]=joinCont(r,newRules[-1]) else: newRules.append(r) return newRules BodyColor_Default = QColor(255, 225, 10) #BodyColor_Default = QColor(Qt.gray) BodyCasesColor_Default = QColor(0, 0, 128) class OWRegressionTreeViewer2D(OWTreeViewer2D): nodeColorOpts = ['Default', 'Instances in node', 'Variance', 'Deviation', 'Error'] nodeInfoButtons = ['Predicted value', 'Variance', 'Deviation', 'Error', 'Number of instances'] def __init__(self, parent=None, signalManager = None, name='RegressionTreeViewer2D'): OWTreeViewer2D.__init__(self, parent, signalManager, name) self.inputs = [("Classification Tree", Orange.regression.tree.TreeClassifier, self.ctree)] self.outputs = [("Data", ExampleTable)] self.NodeColorMethod = 1 self.showNodeInfoText = False self.scene = TreeGraphicsScene(self) self.sceneView = TreeGraphicsView(self, self.scene) self.sceneView.setViewportUpdateMode(QGraphicsView.FullViewportUpdate) self.mainArea.layout().addWidget(self.sceneView) self.toggleZoomSlider() self.connect(self.scene, SIGNAL("selectionChanged()"), self.updateSelection) self.navWidget = OWBaseWidget(self) self.navWidget.lay=QVBoxLayout(self.navWidget) # scene = TreeGraphicsScene(self.navWidget) self.treeNav = TreeNavigator(self.sceneView) #,self,scene,self.navWidget) # self.treeNav.setScene(scene) self.navWidget.layout().addWidget(self.treeNav) self.navWidget.resize(400,400) self.navWidget.setWindowTitle("Navigator") self.setMouseTracking(True) OWGUI.comboBox(self.NodeTab, self, 'NodeColorMethod', items=self.nodeColorOpts, box='Node Color', callback=self.toggleNodeColor, addSpace=True) nodeInfoBox = OWGUI.widgetBox(self.NodeTab, "Show Info On") nodeInfoSettings = ['maj', 'majp', 'tarp', 'error', 'inst'] self.NodeInfoW = []; self.dummy = 0 for i in range(len(self.nodeInfoButtons)): setattr(self, nodeInfoSettings[i], i in self.NodeInfo) w = OWGUI.checkBox(nodeInfoBox, self, nodeInfoSettings[i], \ self.nodeInfoButtons[i], callback=self.setNodeInfo, getwidget=1, id=i) self.NodeInfoW.append(w) OWGUI.rubber(self.NodeTab) # OWGUI.button(self.controlArea, self, "Save As", callback=self.saveGraph, debuggingEnabled = 0) self.NodeInfoSorted=list(self.NodeInfo) self.NodeInfoSorted.sort() def sendReport(self): self.reportSettings("Information", [("Node color", self.nodeColorOpts[self.NodeColorMethod]), ("Data in nodes", ", ".join(s for i, s in enumerate(self.nodeInfoButtons) if self.NodeInfoW[i].isChecked())), ("Line widths", ["Constant", "Proportion of all instances", "Proportion of parent's instances"][self.LineWidthMethod]), ("Tree size", "%i nodes, %i leaves" % (orngTree.countNodes(self.tree), orngTree.countLeaves(self.tree)))]) OWTreeViewer2D.sendReport(self) def setNodeInfo(self, widget=None, id=None): flags = sum(2**i for i, name in enumerate(['maj', 'majp', 'tarp', 'error', 'inst']) if getattr(self, name)) for n in self.scene.nodes(): n.setRect(QRectF()) self.updateNodeInfo(n, flags) if True: w = min(max([n.rect().width() for n in self.scene.nodes()] + [0]), self.MaxNodeWidth if self.LimitNodeWidth else sys.maxint) for n in self.scene.nodes(): n.setRect(QRectF(n.rect().x(), n.rect().y(), w, n.rect().height())) self.scene.fixPos(self.rootNode, 10, 10) self.scene.update() def updateNodeInfo(self, node, flags=63): fix = lambda str: str.replace(">", "&gt;").replace("<", "&lt;") text = "" # if node.attr: # text += "%s<hr width=20000>" % fix(node.attr) lines = [] if flags & 1: start = "Predicted value: " if self.showNodeInfoText else "" lines += [start + fix(str(node.tree.nodeClassifier.defaultValue))] if flags & 2: start = "Variance: " if self.showNodeInfoText else "" lines += [start + "%.1f" % node.tree.distribution.var()] if flags & 4: start = "Deviance: " if self.showNodeInfoText else "" lines += [start + "%.1f" % node.tree.distribution.dev()] if flags & 8: start = "Error: " if self.showNodeInfoText else "" lines += [start + "%.1f" % node.tree.distribution.error()] if flags & 16: start = "Number of instances: " if self.showNodeInfoText else "" lines += [start + "%i" % node.tree.distribution.cases] text += "<br>".join(lines) if node.tree.branchSelector: text += "<hr>%s" % (fix(node.tree.branchSelector.classVar.name)) else: text += "<hr>%s" % (fix(str(node.tree.nodeClassifier.defaultValue))) node.setHtml(text) def activateLoadedSettings(self): if not self.tree: return OWTreeViewer2D.activateLoadedSettings(self) self.setNodeInfo() self.toggleNodeColor() def toggleNodeSize(self): self.setNodeInfo() self.scene.update() self.sceneView.repaint() def toggleNodeColor(self): for node in self.scene.nodes(): numInst=self.tree.distribution.cases if self.NodeColorMethod == 0: # default color = BodyColor_Default elif self.NodeColorMethod == 1: # instances in node light = 400 - 300*node.tree.distribution.cases/numInst color = BodyCasesColor_Default.light(light) elif self.NodeColorMethod == 2: light = 300-min([node.tree.distribution.var(),100]) color = BodyCasesColor_Default.light(light) elif self.NodeColorMethod == 3: light = 300 - min([node.tree.distribution.dev(),100]) color = BodyCasesColor_Default.light(light) elif self.NodeColorMethod == 4: light = 400 - 300*node.tree.distribution.error() color = BodyCasesColor_Default.light(light) # gradient = QLinearGradient(0, 0, 0, 100) # gradient.setStops([(0, color.lighter(120)), (1, color.lighter())]) # node.backgroundBrush = QBrush(gradient) node.backgroundBrush = QBrush(color) self.scene.update() # self.treeNav.leech() def ctree(self, tree=None): self.send("Data", None) OWTreeViewer2D.ctree(self, tree) def walkcreate(self, tree, parent=None, level=0, attrVal=""): node=RegressionTreeNode(attrVal, tree, parent, None, self.scene) if parent: parent.graph_add_edge(GraphicsEdge(None, self.scene, node1=parent, node2=node)) if tree.branches: for i in range(len(tree.branches)): if tree.branches[i]: self.walkcreate(tree.branches[i],node,level+1,tree.branchDescriptions[i]) return node def nodeToolTip(self, node): rule=list(node.rule()) fix = lambda str: str.replace(">", "&gt;").replace("<", "&lt;") if rule: try: rule=parseRules(list(rule)) except: pass text="<b>IF</b> "+" <b>AND</b><br>\n ".join([fix(a[0].name+" "+a[1]) for a in rule])+"\n<br><b>THEN</b> "+fix(str(node.tree.nodeClassifier.defaultValue)) else: text="<b>THEN</b> "+fix(str(node.tree.nodeClassifier.defaultValue)) text += "<hr>Instances: %i (%.1f%%)" % (node.tree.distribution.cases, node.tree.distribution.cases/self.tree.distribution.cases*100) text += "<hr>Partition on %s<hr>" % node.tree.branchSelector.classVar.name if node.tree.branchSelector else "<hr>" text += fix(node.tree.nodeClassifier.classVar.name + " = " + str(node.tree.nodeClassifier.defaultValue)) return text if __name__=="__main__": a = QApplication(sys.argv) ow = OWRegressionTreeViewer2D() data = orange.ExampleTable('../../doc/datasets/housing.tab') tree = orange.TreeLearner(data, storeExamples = 1) ow.ctree(tree) # here you can test setting some stuff ow.show() a.exec_() ow.saveSettings()
gpl-3.0
thientu/scikit-learn
sklearn/feature_extraction/text.py
110
50157
# -*- coding: utf-8 -*- # Authors: Olivier Grisel <olivier.grisel@ensta.org> # Mathieu Blondel <mathieu@mblondel.org> # Lars Buitinck <L.J.Buitinck@uva.nl> # Robert Layton <robertlayton@gmail.com> # Jochen Wersdörfer <jochen@wersdoerfer.de> # Roman Sinayev <roman.sinayev@gmail.com> # # License: BSD 3 clause """ The :mod:`sklearn.feature_extraction.text` submodule gathers utilities to build feature vectors from text documents. """ from __future__ import unicode_literals import array from collections import Mapping, defaultdict import numbers from operator import itemgetter import re import unicodedata import numpy as np import scipy.sparse as sp from ..base import BaseEstimator, TransformerMixin from ..externals import six from ..externals.six.moves import xrange from ..preprocessing import normalize from .hashing import FeatureHasher from .stop_words import ENGLISH_STOP_WORDS from ..utils import deprecated from ..utils.fixes import frombuffer_empty, bincount from ..utils.validation import check_is_fitted __all__ = ['CountVectorizer', 'ENGLISH_STOP_WORDS', 'TfidfTransformer', 'TfidfVectorizer', 'strip_accents_ascii', 'strip_accents_unicode', 'strip_tags'] def strip_accents_unicode(s): """Transform accentuated unicode symbols into their simple counterpart Warning: the python-level loop and join operations make this implementation 20 times slower than the strip_accents_ascii basic normalization. See also -------- strip_accents_ascii Remove accentuated char for any unicode symbol that has a direct ASCII equivalent. """ return ''.join([c for c in unicodedata.normalize('NFKD', s) if not unicodedata.combining(c)]) def strip_accents_ascii(s): """Transform accentuated unicode symbols into ascii or nothing Warning: this solution is only suited for languages that have a direct transliteration to ASCII symbols. See also -------- strip_accents_unicode Remove accentuated char for any unicode symbol. """ nkfd_form = unicodedata.normalize('NFKD', s) return nkfd_form.encode('ASCII', 'ignore').decode('ASCII') def strip_tags(s): """Basic regexp based HTML / XML tag stripper function For serious HTML/XML preprocessing you should rather use an external library such as lxml or BeautifulSoup. """ return re.compile(r"<([^>]+)>", flags=re.UNICODE).sub(" ", s) def _check_stop_list(stop): if stop == "english": return ENGLISH_STOP_WORDS elif isinstance(stop, six.string_types): raise ValueError("not a built-in stop list: %s" % stop) elif stop is None: return None else: # assume it's a collection return frozenset(stop) class VectorizerMixin(object): """Provides common code for text vectorizers (tokenization logic).""" _white_spaces = re.compile(r"\s\s+") def decode(self, doc): """Decode the input into a string of unicode symbols The decoding strategy depends on the vectorizer parameters. """ if self.input == 'filename': with open(doc, 'rb') as fh: doc = fh.read() elif self.input == 'file': doc = doc.read() if isinstance(doc, bytes): doc = doc.decode(self.encoding, self.decode_error) if doc is np.nan: raise ValueError("np.nan is an invalid document, expected byte or " "unicode string.") return doc def _word_ngrams(self, tokens, stop_words=None): """Turn tokens into a sequence of n-grams after stop words filtering""" # handle stop words if stop_words is not None: tokens = [w for w in tokens if w not in stop_words] # handle token n-grams min_n, max_n = self.ngram_range if max_n != 1: original_tokens = tokens tokens = [] n_original_tokens = len(original_tokens) for n in xrange(min_n, min(max_n + 1, n_original_tokens + 1)): for i in xrange(n_original_tokens - n + 1): tokens.append(" ".join(original_tokens[i: i + n])) return tokens def _char_ngrams(self, text_document): """Tokenize text_document into a sequence of character n-grams""" # normalize white spaces text_document = self._white_spaces.sub(" ", text_document) text_len = len(text_document) ngrams = [] min_n, max_n = self.ngram_range for n in xrange(min_n, min(max_n + 1, text_len + 1)): for i in xrange(text_len - n + 1): ngrams.append(text_document[i: i + n]) return ngrams def _char_wb_ngrams(self, text_document): """Whitespace sensitive char-n-gram tokenization. Tokenize text_document into a sequence of character n-grams excluding any whitespace (operating only inside word boundaries)""" # normalize white spaces text_document = self._white_spaces.sub(" ", text_document) min_n, max_n = self.ngram_range ngrams = [] for w in text_document.split(): w = ' ' + w + ' ' w_len = len(w) for n in xrange(min_n, max_n + 1): offset = 0 ngrams.append(w[offset:offset + n]) while offset + n < w_len: offset += 1 ngrams.append(w[offset:offset + n]) if offset == 0: # count a short word (w_len < n) only once break return ngrams def build_preprocessor(self): """Return a function to preprocess the text before tokenization""" if self.preprocessor is not None: return self.preprocessor # unfortunately python functools package does not have an efficient # `compose` function that would have allowed us to chain a dynamic # number of functions. However the cost of a lambda call is a few # hundreds of nanoseconds which is negligible when compared to the # cost of tokenizing a string of 1000 chars for instance. noop = lambda x: x # accent stripping if not self.strip_accents: strip_accents = noop elif callable(self.strip_accents): strip_accents = self.strip_accents elif self.strip_accents == 'ascii': strip_accents = strip_accents_ascii elif self.strip_accents == 'unicode': strip_accents = strip_accents_unicode else: raise ValueError('Invalid value for "strip_accents": %s' % self.strip_accents) if self.lowercase: return lambda x: strip_accents(x.lower()) else: return strip_accents def build_tokenizer(self): """Return a function that splits a string into a sequence of tokens""" if self.tokenizer is not None: return self.tokenizer token_pattern = re.compile(self.token_pattern) return lambda doc: token_pattern.findall(doc) def get_stop_words(self): """Build or fetch the effective stop words list""" return _check_stop_list(self.stop_words) def build_analyzer(self): """Return a callable that handles preprocessing and tokenization""" if callable(self.analyzer): return self.analyzer preprocess = self.build_preprocessor() if self.analyzer == 'char': return lambda doc: self._char_ngrams(preprocess(self.decode(doc))) elif self.analyzer == 'char_wb': return lambda doc: self._char_wb_ngrams( preprocess(self.decode(doc))) elif self.analyzer == 'word': stop_words = self.get_stop_words() tokenize = self.build_tokenizer() return lambda doc: self._word_ngrams( tokenize(preprocess(self.decode(doc))), stop_words) else: raise ValueError('%s is not a valid tokenization scheme/analyzer' % self.analyzer) def _validate_vocabulary(self): vocabulary = self.vocabulary if vocabulary is not None: if not isinstance(vocabulary, Mapping): vocab = {} for i, t in enumerate(vocabulary): if vocab.setdefault(t, i) != i: msg = "Duplicate term in vocabulary: %r" % t raise ValueError(msg) vocabulary = vocab else: indices = set(six.itervalues(vocabulary)) if len(indices) != len(vocabulary): raise ValueError("Vocabulary contains repeated indices.") for i in xrange(len(vocabulary)): if i not in indices: msg = ("Vocabulary of size %d doesn't contain index " "%d." % (len(vocabulary), i)) raise ValueError(msg) if not vocabulary: raise ValueError("empty vocabulary passed to fit") self.fixed_vocabulary_ = True self.vocabulary_ = dict(vocabulary) else: self.fixed_vocabulary_ = False def _check_vocabulary(self): """Check if vocabulary is empty or missing (not fit-ed)""" msg = "%(name)s - Vocabulary wasn't fitted." check_is_fitted(self, 'vocabulary_', msg=msg), if len(self.vocabulary_) == 0: raise ValueError("Vocabulary is empty") @property @deprecated("The `fixed_vocabulary` attribute is deprecated and will be " "removed in 0.18. Please use `fixed_vocabulary_` instead.") def fixed_vocabulary(self): return self.fixed_vocabulary_ class HashingVectorizer(BaseEstimator, VectorizerMixin): """Convert a collection of text documents to a matrix of token occurrences It turns a collection of text documents into a scipy.sparse matrix holding token occurrence counts (or binary occurrence information), possibly normalized as token frequencies if norm='l1' or projected on the euclidean unit sphere if norm='l2'. This text vectorizer implementation uses the hashing trick to find the token string name to feature integer index mapping. This strategy has several advantages: - it is very low memory scalable to large datasets as there is no need to store a vocabulary dictionary in memory - it is fast to pickle and un-pickle as it holds no state besides the constructor parameters - it can be used in a streaming (partial fit) or parallel pipeline as there is no state computed during fit. There are also a couple of cons (vs using a CountVectorizer with an in-memory vocabulary): - there is no way to compute the inverse transform (from feature indices to string feature names) which can be a problem when trying to introspect which features are most important to a model. - there can be collisions: distinct tokens can be mapped to the same feature index. However in practice this is rarely an issue if n_features is large enough (e.g. 2 ** 18 for text classification problems). - no IDF weighting as this would render the transformer stateful. The hash function employed is the signed 32-bit version of Murmurhash3. Read more in the :ref:`User Guide <text_feature_extraction>`. Parameters ---------- input : string {'filename', 'file', 'content'} If 'filename', the sequence passed as an argument to fit is expected to be a list of filenames that need reading to fetch the raw content to analyze. If 'file', the sequence items must have a 'read' method (file-like object) that is called to fetch the bytes in memory. Otherwise the input is expected to be the sequence strings or bytes items are expected to be analyzed directly. encoding : string, default='utf-8' If bytes or files are given to analyze, this encoding is used to decode. decode_error : {'strict', 'ignore', 'replace'} Instruction on what to do if a byte sequence is given to analyze that contains characters not of the given `encoding`. By default, it is 'strict', meaning that a UnicodeDecodeError will be raised. Other values are 'ignore' and 'replace'. strip_accents : {'ascii', 'unicode', None} Remove accents during the preprocessing step. 'ascii' is a fast method that only works on characters that have an direct ASCII mapping. 'unicode' is a slightly slower method that works on any characters. None (default) does nothing. analyzer : string, {'word', 'char', 'char_wb'} or callable Whether the feature should be made of word or character n-grams. Option 'char_wb' creates character n-grams only from text inside word boundaries. If a callable is passed it is used to extract the sequence of features out of the raw, unprocessed input. preprocessor : callable or None (default) Override the preprocessing (string transformation) stage while preserving the tokenizing and n-grams generation steps. tokenizer : callable or None (default) Override the string tokenization step while preserving the preprocessing and n-grams generation steps. Only applies if ``analyzer == 'word'``. ngram_range : tuple (min_n, max_n), default=(1, 1) The lower and upper boundary of the range of n-values for different n-grams to be extracted. All values of n such that min_n <= n <= max_n will be used. stop_words : string {'english'}, list, or None (default) If 'english', a built-in stop word list for English is used. If a list, that list is assumed to contain stop words, all of which will be removed from the resulting tokens. Only applies if ``analyzer == 'word'``. lowercase : boolean, default=True Convert all characters to lowercase before tokenizing. token_pattern : string Regular expression denoting what constitutes a "token", only used if ``analyzer == 'word'``. The default regexp selects tokens of 2 or more alphanumeric characters (punctuation is completely ignored and always treated as a token separator). n_features : integer, default=(2 ** 20) The number of features (columns) in the output matrices. Small numbers of features are likely to cause hash collisions, but large numbers will cause larger coefficient dimensions in linear learners. norm : 'l1', 'l2' or None, optional Norm used to normalize term vectors. None for no normalization. binary: boolean, default=False. If True, all non zero counts are set to 1. This is useful for discrete probabilistic models that model binary events rather than integer counts. dtype: type, optional Type of the matrix returned by fit_transform() or transform(). non_negative : boolean, default=False Whether output matrices should contain non-negative values only; effectively calls abs on the matrix prior to returning it. When True, output values can be interpreted as frequencies. When False, output values will have expected value zero. See also -------- CountVectorizer, TfidfVectorizer """ def __init__(self, input='content', encoding='utf-8', decode_error='strict', strip_accents=None, lowercase=True, preprocessor=None, tokenizer=None, stop_words=None, token_pattern=r"(?u)\b\w\w+\b", ngram_range=(1, 1), analyzer='word', n_features=(2 ** 20), binary=False, norm='l2', non_negative=False, dtype=np.float64): self.input = input self.encoding = encoding self.decode_error = decode_error self.strip_accents = strip_accents self.preprocessor = preprocessor self.tokenizer = tokenizer self.analyzer = analyzer self.lowercase = lowercase self.token_pattern = token_pattern self.stop_words = stop_words self.n_features = n_features self.ngram_range = ngram_range self.binary = binary self.norm = norm self.non_negative = non_negative self.dtype = dtype def partial_fit(self, X, y=None): """Does nothing: this transformer is stateless. This method is just there to mark the fact that this transformer can work in a streaming setup. """ return self def fit(self, X, y=None): """Does nothing: this transformer is stateless.""" # triggers a parameter validation self._get_hasher().fit(X, y=y) return self def transform(self, X, y=None): """Transform a sequence of documents to a document-term matrix. Parameters ---------- X : iterable over raw text documents, length = n_samples Samples. Each sample must be a text document (either bytes or unicode strings, file name or file object depending on the constructor argument) which will be tokenized and hashed. y : (ignored) Returns ------- X : scipy.sparse matrix, shape = (n_samples, self.n_features) Document-term matrix. """ analyzer = self.build_analyzer() X = self._get_hasher().transform(analyzer(doc) for doc in X) if self.binary: X.data.fill(1) if self.norm is not None: X = normalize(X, norm=self.norm, copy=False) return X # Alias transform to fit_transform for convenience fit_transform = transform def _get_hasher(self): return FeatureHasher(n_features=self.n_features, input_type='string', dtype=self.dtype, non_negative=self.non_negative) def _document_frequency(X): """Count the number of non-zero values for each feature in sparse X.""" if sp.isspmatrix_csr(X): return bincount(X.indices, minlength=X.shape[1]) else: return np.diff(sp.csc_matrix(X, copy=False).indptr) class CountVectorizer(BaseEstimator, VectorizerMixin): """Convert a collection of text documents to a matrix of token counts This implementation produces a sparse representation of the counts using scipy.sparse.coo_matrix. If you do not provide an a-priori dictionary and you do not use an analyzer that does some kind of feature selection then the number of features will be equal to the vocabulary size found by analyzing the data. Read more in the :ref:`User Guide <text_feature_extraction>`. Parameters ---------- input : string {'filename', 'file', 'content'} If 'filename', the sequence passed as an argument to fit is expected to be a list of filenames that need reading to fetch the raw content to analyze. If 'file', the sequence items must have a 'read' method (file-like object) that is called to fetch the bytes in memory. Otherwise the input is expected to be the sequence strings or bytes items are expected to be analyzed directly. encoding : string, 'utf-8' by default. If bytes or files are given to analyze, this encoding is used to decode. decode_error : {'strict', 'ignore', 'replace'} Instruction on what to do if a byte sequence is given to analyze that contains characters not of the given `encoding`. By default, it is 'strict', meaning that a UnicodeDecodeError will be raised. Other values are 'ignore' and 'replace'. strip_accents : {'ascii', 'unicode', None} Remove accents during the preprocessing step. 'ascii' is a fast method that only works on characters that have an direct ASCII mapping. 'unicode' is a slightly slower method that works on any characters. None (default) does nothing. analyzer : string, {'word', 'char', 'char_wb'} or callable Whether the feature should be made of word or character n-grams. Option 'char_wb' creates character n-grams only from text inside word boundaries. If a callable is passed it is used to extract the sequence of features out of the raw, unprocessed input. Only applies if ``analyzer == 'word'``. preprocessor : callable or None (default) Override the preprocessing (string transformation) stage while preserving the tokenizing and n-grams generation steps. tokenizer : callable or None (default) Override the string tokenization step while preserving the preprocessing and n-grams generation steps. Only applies if ``analyzer == 'word'``. ngram_range : tuple (min_n, max_n) The lower and upper boundary of the range of n-values for different n-grams to be extracted. All values of n such that min_n <= n <= max_n will be used. stop_words : string {'english'}, list, or None (default) If 'english', a built-in stop word list for English is used. If a list, that list is assumed to contain stop words, all of which will be removed from the resulting tokens. Only applies if ``analyzer == 'word'``. If None, no stop words will be used. max_df can be set to a value in the range [0.7, 1.0) to automatically detect and filter stop words based on intra corpus document frequency of terms. lowercase : boolean, True by default Convert all characters to lowercase before tokenizing. token_pattern : string Regular expression denoting what constitutes a "token", only used if ``analyzer == 'word'``. The default regexp select tokens of 2 or more alphanumeric characters (punctuation is completely ignored and always treated as a token separator). max_df : float in range [0.0, 1.0] or int, default=1.0 When building the vocabulary ignore terms that have a document frequency strictly higher than the given threshold (corpus-specific stop words). If float, the parameter represents a proportion of documents, integer absolute counts. This parameter is ignored if vocabulary is not None. min_df : float in range [0.0, 1.0] or int, default=1 When building the vocabulary ignore terms that have a document frequency strictly lower than the given threshold. This value is also called cut-off in the literature. If float, the parameter represents a proportion of documents, integer absolute counts. This parameter is ignored if vocabulary is not None. max_features : int or None, default=None If not None, build a vocabulary that only consider the top max_features ordered by term frequency across the corpus. This parameter is ignored if vocabulary is not None. vocabulary : Mapping or iterable, optional Either a Mapping (e.g., a dict) where keys are terms and values are indices in the feature matrix, or an iterable over terms. If not given, a vocabulary is determined from the input documents. Indices in the mapping should not be repeated and should not have any gap between 0 and the largest index. binary : boolean, default=False If True, all non zero counts are set to 1. This is useful for discrete probabilistic models that model binary events rather than integer counts. dtype : type, optional Type of the matrix returned by fit_transform() or transform(). Attributes ---------- vocabulary_ : dict A mapping of terms to feature indices. stop_words_ : set Terms that were ignored because they either: - occurred in too many documents (`max_df`) - occurred in too few documents (`min_df`) - were cut off by feature selection (`max_features`). This is only available if no vocabulary was given. See also -------- HashingVectorizer, TfidfVectorizer Notes ----- The ``stop_words_`` attribute can get large and increase the model size when pickling. This attribute is provided only for introspection and can be safely removed using delattr or set to None before pickling. """ def __init__(self, input='content', encoding='utf-8', decode_error='strict', strip_accents=None, lowercase=True, preprocessor=None, tokenizer=None, stop_words=None, token_pattern=r"(?u)\b\w\w+\b", ngram_range=(1, 1), analyzer='word', max_df=1.0, min_df=1, max_features=None, vocabulary=None, binary=False, dtype=np.int64): self.input = input self.encoding = encoding self.decode_error = decode_error self.strip_accents = strip_accents self.preprocessor = preprocessor self.tokenizer = tokenizer self.analyzer = analyzer self.lowercase = lowercase self.token_pattern = token_pattern self.stop_words = stop_words self.max_df = max_df self.min_df = min_df if max_df < 0 or min_df < 0: raise ValueError("negative value for max_df of min_df") self.max_features = max_features if max_features is not None: if (not isinstance(max_features, numbers.Integral) or max_features <= 0): raise ValueError( "max_features=%r, neither a positive integer nor None" % max_features) self.ngram_range = ngram_range self.vocabulary = vocabulary self.binary = binary self.dtype = dtype def _sort_features(self, X, vocabulary): """Sort features by name Returns a reordered matrix and modifies the vocabulary in place """ sorted_features = sorted(six.iteritems(vocabulary)) map_index = np.empty(len(sorted_features), dtype=np.int32) for new_val, (term, old_val) in enumerate(sorted_features): map_index[new_val] = old_val vocabulary[term] = new_val return X[:, map_index] def _limit_features(self, X, vocabulary, high=None, low=None, limit=None): """Remove too rare or too common features. Prune features that are non zero in more samples than high or less documents than low, modifying the vocabulary, and restricting it to at most the limit most frequent. This does not prune samples with zero features. """ if high is None and low is None and limit is None: return X, set() # Calculate a mask based on document frequencies dfs = _document_frequency(X) tfs = np.asarray(X.sum(axis=0)).ravel() mask = np.ones(len(dfs), dtype=bool) if high is not None: mask &= dfs <= high if low is not None: mask &= dfs >= low if limit is not None and mask.sum() > limit: mask_inds = (-tfs[mask]).argsort()[:limit] new_mask = np.zeros(len(dfs), dtype=bool) new_mask[np.where(mask)[0][mask_inds]] = True mask = new_mask new_indices = np.cumsum(mask) - 1 # maps old indices to new removed_terms = set() for term, old_index in list(six.iteritems(vocabulary)): if mask[old_index]: vocabulary[term] = new_indices[old_index] else: del vocabulary[term] removed_terms.add(term) kept_indices = np.where(mask)[0] if len(kept_indices) == 0: raise ValueError("After pruning, no terms remain. Try a lower" " min_df or a higher max_df.") return X[:, kept_indices], removed_terms def _count_vocab(self, raw_documents, fixed_vocab): """Create sparse feature matrix, and vocabulary where fixed_vocab=False """ if fixed_vocab: vocabulary = self.vocabulary_ else: # Add a new value when a new vocabulary item is seen vocabulary = defaultdict() vocabulary.default_factory = vocabulary.__len__ analyze = self.build_analyzer() j_indices = _make_int_array() indptr = _make_int_array() indptr.append(0) for doc in raw_documents: for feature in analyze(doc): try: j_indices.append(vocabulary[feature]) except KeyError: # Ignore out-of-vocabulary items for fixed_vocab=True continue indptr.append(len(j_indices)) if not fixed_vocab: # disable defaultdict behaviour vocabulary = dict(vocabulary) if not vocabulary: raise ValueError("empty vocabulary; perhaps the documents only" " contain stop words") j_indices = frombuffer_empty(j_indices, dtype=np.intc) indptr = np.frombuffer(indptr, dtype=np.intc) values = np.ones(len(j_indices)) X = sp.csr_matrix((values, j_indices, indptr), shape=(len(indptr) - 1, len(vocabulary)), dtype=self.dtype) X.sum_duplicates() return vocabulary, X def fit(self, raw_documents, y=None): """Learn a vocabulary dictionary of all tokens in the raw documents. Parameters ---------- raw_documents : iterable An iterable which yields either str, unicode or file objects. Returns ------- self """ self.fit_transform(raw_documents) return self def fit_transform(self, raw_documents, y=None): """Learn the vocabulary dictionary and return term-document matrix. This is equivalent to fit followed by transform, but more efficiently implemented. Parameters ---------- raw_documents : iterable An iterable which yields either str, unicode or file objects. Returns ------- X : array, [n_samples, n_features] Document-term matrix. """ # We intentionally don't call the transform method to make # fit_transform overridable without unwanted side effects in # TfidfVectorizer. self._validate_vocabulary() max_df = self.max_df min_df = self.min_df max_features = self.max_features vocabulary, X = self._count_vocab(raw_documents, self.fixed_vocabulary_) if self.binary: X.data.fill(1) if not self.fixed_vocabulary_: X = self._sort_features(X, vocabulary) n_doc = X.shape[0] max_doc_count = (max_df if isinstance(max_df, numbers.Integral) else max_df * n_doc) min_doc_count = (min_df if isinstance(min_df, numbers.Integral) else min_df * n_doc) if max_doc_count < min_doc_count: raise ValueError( "max_df corresponds to < documents than min_df") X, self.stop_words_ = self._limit_features(X, vocabulary, max_doc_count, min_doc_count, max_features) self.vocabulary_ = vocabulary return X def transform(self, raw_documents): """Transform documents to document-term matrix. Extract token counts out of raw text documents using the vocabulary fitted with fit or the one provided to the constructor. Parameters ---------- raw_documents : iterable An iterable which yields either str, unicode or file objects. Returns ------- X : sparse matrix, [n_samples, n_features] Document-term matrix. """ if not hasattr(self, 'vocabulary_'): self._validate_vocabulary() self._check_vocabulary() # use the same matrix-building strategy as fit_transform _, X = self._count_vocab(raw_documents, fixed_vocab=True) if self.binary: X.data.fill(1) return X def inverse_transform(self, X): """Return terms per document with nonzero entries in X. Parameters ---------- X : {array, sparse matrix}, shape = [n_samples, n_features] Returns ------- X_inv : list of arrays, len = n_samples List of arrays of terms. """ self._check_vocabulary() if sp.issparse(X): # We need CSR format for fast row manipulations. X = X.tocsr() else: # We need to convert X to a matrix, so that the indexing # returns 2D objects X = np.asmatrix(X) n_samples = X.shape[0] terms = np.array(list(self.vocabulary_.keys())) indices = np.array(list(self.vocabulary_.values())) inverse_vocabulary = terms[np.argsort(indices)] return [inverse_vocabulary[X[i, :].nonzero()[1]].ravel() for i in range(n_samples)] def get_feature_names(self): """Array mapping from feature integer indices to feature name""" self._check_vocabulary() return [t for t, i in sorted(six.iteritems(self.vocabulary_), key=itemgetter(1))] def _make_int_array(): """Construct an array.array of a type suitable for scipy.sparse indices.""" return array.array(str("i")) class TfidfTransformer(BaseEstimator, TransformerMixin): """Transform a count matrix to a normalized tf or tf-idf representation Tf means term-frequency while tf-idf means term-frequency times inverse document-frequency. This is a common term weighting scheme in information retrieval, that has also found good use in document classification. The goal of using tf-idf instead of the raw frequencies of occurrence of a token in a given document is to scale down the impact of tokens that occur very frequently in a given corpus and that are hence empirically less informative than features that occur in a small fraction of the training corpus. The actual formula used for tf-idf is tf * (idf + 1) = tf + tf * idf, instead of tf * idf. The effect of this is that terms with zero idf, i.e. that occur in all documents of a training set, will not be entirely ignored. The formulas used to compute tf and idf depend on parameter settings that correspond to the SMART notation used in IR, as follows: Tf is "n" (natural) by default, "l" (logarithmic) when sublinear_tf=True. Idf is "t" when use_idf is given, "n" (none) otherwise. Normalization is "c" (cosine) when norm='l2', "n" (none) when norm=None. Read more in the :ref:`User Guide <text_feature_extraction>`. Parameters ---------- norm : 'l1', 'l2' or None, optional Norm used to normalize term vectors. None for no normalization. use_idf : boolean, default=True Enable inverse-document-frequency reweighting. smooth_idf : boolean, default=True Smooth idf weights by adding one to document frequencies, as if an extra document was seen containing every term in the collection exactly once. Prevents zero divisions. sublinear_tf : boolean, default=False Apply sublinear tf scaling, i.e. replace tf with 1 + log(tf). References ---------- .. [Yates2011] `R. Baeza-Yates and B. Ribeiro-Neto (2011). Modern Information Retrieval. Addison Wesley, pp. 68-74.` .. [MRS2008] `C.D. Manning, P. Raghavan and H. Schuetze (2008). Introduction to Information Retrieval. Cambridge University Press, pp. 118-120.` """ def __init__(self, norm='l2', use_idf=True, smooth_idf=True, sublinear_tf=False): self.norm = norm self.use_idf = use_idf self.smooth_idf = smooth_idf self.sublinear_tf = sublinear_tf def fit(self, X, y=None): """Learn the idf vector (global term weights) Parameters ---------- X : sparse matrix, [n_samples, n_features] a matrix of term/token counts """ if not sp.issparse(X): X = sp.csc_matrix(X) if self.use_idf: n_samples, n_features = X.shape df = _document_frequency(X) # perform idf smoothing if required df += int(self.smooth_idf) n_samples += int(self.smooth_idf) # log+1 instead of log makes sure terms with zero idf don't get # suppressed entirely. idf = np.log(float(n_samples) / df) + 1.0 self._idf_diag = sp.spdiags(idf, diags=0, m=n_features, n=n_features) return self def transform(self, X, copy=True): """Transform a count matrix to a tf or tf-idf representation Parameters ---------- X : sparse matrix, [n_samples, n_features] a matrix of term/token counts copy : boolean, default True Whether to copy X and operate on the copy or perform in-place operations. Returns ------- vectors : sparse matrix, [n_samples, n_features] """ if hasattr(X, 'dtype') and np.issubdtype(X.dtype, np.float): # preserve float family dtype X = sp.csr_matrix(X, copy=copy) else: # convert counts or binary occurrences to floats X = sp.csr_matrix(X, dtype=np.float64, copy=copy) n_samples, n_features = X.shape if self.sublinear_tf: np.log(X.data, X.data) X.data += 1 if self.use_idf: check_is_fitted(self, '_idf_diag', 'idf vector is not fitted') expected_n_features = self._idf_diag.shape[0] if n_features != expected_n_features: raise ValueError("Input has n_features=%d while the model" " has been trained with n_features=%d" % ( n_features, expected_n_features)) # *= doesn't work X = X * self._idf_diag if self.norm: X = normalize(X, norm=self.norm, copy=False) return X @property def idf_(self): if hasattr(self, "_idf_diag"): return np.ravel(self._idf_diag.sum(axis=0)) else: return None class TfidfVectorizer(CountVectorizer): """Convert a collection of raw documents to a matrix of TF-IDF features. Equivalent to CountVectorizer followed by TfidfTransformer. Read more in the :ref:`User Guide <text_feature_extraction>`. Parameters ---------- input : string {'filename', 'file', 'content'} If 'filename', the sequence passed as an argument to fit is expected to be a list of filenames that need reading to fetch the raw content to analyze. If 'file', the sequence items must have a 'read' method (file-like object) that is called to fetch the bytes in memory. Otherwise the input is expected to be the sequence strings or bytes items are expected to be analyzed directly. encoding : string, 'utf-8' by default. If bytes or files are given to analyze, this encoding is used to decode. decode_error : {'strict', 'ignore', 'replace'} Instruction on what to do if a byte sequence is given to analyze that contains characters not of the given `encoding`. By default, it is 'strict', meaning that a UnicodeDecodeError will be raised. Other values are 'ignore' and 'replace'. strip_accents : {'ascii', 'unicode', None} Remove accents during the preprocessing step. 'ascii' is a fast method that only works on characters that have an direct ASCII mapping. 'unicode' is a slightly slower method that works on any characters. None (default) does nothing. analyzer : string, {'word', 'char'} or callable Whether the feature should be made of word or character n-grams. If a callable is passed it is used to extract the sequence of features out of the raw, unprocessed input. preprocessor : callable or None (default) Override the preprocessing (string transformation) stage while preserving the tokenizing and n-grams generation steps. tokenizer : callable or None (default) Override the string tokenization step while preserving the preprocessing and n-grams generation steps. Only applies if ``analyzer == 'word'``. ngram_range : tuple (min_n, max_n) The lower and upper boundary of the range of n-values for different n-grams to be extracted. All values of n such that min_n <= n <= max_n will be used. stop_words : string {'english'}, list, or None (default) If a string, it is passed to _check_stop_list and the appropriate stop list is returned. 'english' is currently the only supported string value. If a list, that list is assumed to contain stop words, all of which will be removed from the resulting tokens. Only applies if ``analyzer == 'word'``. If None, no stop words will be used. max_df can be set to a value in the range [0.7, 1.0) to automatically detect and filter stop words based on intra corpus document frequency of terms. lowercase : boolean, default True Convert all characters to lowercase before tokenizing. token_pattern : string Regular expression denoting what constitutes a "token", only used if ``analyzer == 'word'``. The default regexp selects tokens of 2 or more alphanumeric characters (punctuation is completely ignored and always treated as a token separator). max_df : float in range [0.0, 1.0] or int, default=1.0 When building the vocabulary ignore terms that have a document frequency strictly higher than the given threshold (corpus-specific stop words). If float, the parameter represents a proportion of documents, integer absolute counts. This parameter is ignored if vocabulary is not None. min_df : float in range [0.0, 1.0] or int, default=1 When building the vocabulary ignore terms that have a document frequency strictly lower than the given threshold. This value is also called cut-off in the literature. If float, the parameter represents a proportion of documents, integer absolute counts. This parameter is ignored if vocabulary is not None. max_features : int or None, default=None If not None, build a vocabulary that only consider the top max_features ordered by term frequency across the corpus. This parameter is ignored if vocabulary is not None. vocabulary : Mapping or iterable, optional Either a Mapping (e.g., a dict) where keys are terms and values are indices in the feature matrix, or an iterable over terms. If not given, a vocabulary is determined from the input documents. binary : boolean, default=False If True, all non-zero term counts are set to 1. This does not mean outputs will have only 0/1 values, only that the tf term in tf-idf is binary. (Set idf and normalization to False to get 0/1 outputs.) dtype : type, optional Type of the matrix returned by fit_transform() or transform(). norm : 'l1', 'l2' or None, optional Norm used to normalize term vectors. None for no normalization. use_idf : boolean, default=True Enable inverse-document-frequency reweighting. smooth_idf : boolean, default=True Smooth idf weights by adding one to document frequencies, as if an extra document was seen containing every term in the collection exactly once. Prevents zero divisions. sublinear_tf : boolean, default=False Apply sublinear tf scaling, i.e. replace tf with 1 + log(tf). Attributes ---------- idf_ : array, shape = [n_features], or None The learned idf vector (global term weights) when ``use_idf`` is set to True, None otherwise. stop_words_ : set Terms that were ignored because they either: - occurred in too many documents (`max_df`) - occurred in too few documents (`min_df`) - were cut off by feature selection (`max_features`). This is only available if no vocabulary was given. See also -------- CountVectorizer Tokenize the documents and count the occurrences of token and return them as a sparse matrix TfidfTransformer Apply Term Frequency Inverse Document Frequency normalization to a sparse matrix of occurrence counts. Notes ----- The ``stop_words_`` attribute can get large and increase the model size when pickling. This attribute is provided only for introspection and can be safely removed using delattr or set to None before pickling. """ def __init__(self, input='content', encoding='utf-8', decode_error='strict', strip_accents=None, lowercase=True, preprocessor=None, tokenizer=None, analyzer='word', stop_words=None, token_pattern=r"(?u)\b\w\w+\b", ngram_range=(1, 1), max_df=1.0, min_df=1, max_features=None, vocabulary=None, binary=False, dtype=np.int64, norm='l2', use_idf=True, smooth_idf=True, sublinear_tf=False): super(TfidfVectorizer, self).__init__( input=input, encoding=encoding, decode_error=decode_error, strip_accents=strip_accents, lowercase=lowercase, preprocessor=preprocessor, tokenizer=tokenizer, analyzer=analyzer, stop_words=stop_words, token_pattern=token_pattern, ngram_range=ngram_range, max_df=max_df, min_df=min_df, max_features=max_features, vocabulary=vocabulary, binary=binary, dtype=dtype) self._tfidf = TfidfTransformer(norm=norm, use_idf=use_idf, smooth_idf=smooth_idf, sublinear_tf=sublinear_tf) # Broadcast the TF-IDF parameters to the underlying transformer instance # for easy grid search and repr @property def norm(self): return self._tfidf.norm @norm.setter def norm(self, value): self._tfidf.norm = value @property def use_idf(self): return self._tfidf.use_idf @use_idf.setter def use_idf(self, value): self._tfidf.use_idf = value @property def smooth_idf(self): return self._tfidf.smooth_idf @smooth_idf.setter def smooth_idf(self, value): self._tfidf.smooth_idf = value @property def sublinear_tf(self): return self._tfidf.sublinear_tf @sublinear_tf.setter def sublinear_tf(self, value): self._tfidf.sublinear_tf = value @property def idf_(self): return self._tfidf.idf_ def fit(self, raw_documents, y=None): """Learn vocabulary and idf from training set. Parameters ---------- raw_documents : iterable an iterable which yields either str, unicode or file objects Returns ------- self : TfidfVectorizer """ X = super(TfidfVectorizer, self).fit_transform(raw_documents) self._tfidf.fit(X) return self def fit_transform(self, raw_documents, y=None): """Learn vocabulary and idf, return term-document matrix. This is equivalent to fit followed by transform, but more efficiently implemented. Parameters ---------- raw_documents : iterable an iterable which yields either str, unicode or file objects Returns ------- X : sparse matrix, [n_samples, n_features] Tf-idf-weighted document-term matrix. """ X = super(TfidfVectorizer, self).fit_transform(raw_documents) self._tfidf.fit(X) # X is already a transformed view of raw_documents so # we set copy to False return self._tfidf.transform(X, copy=False) def transform(self, raw_documents, copy=True): """Transform documents to document-term matrix. Uses the vocabulary and document frequencies (df) learned by fit (or fit_transform). Parameters ---------- raw_documents : iterable an iterable which yields either str, unicode or file objects copy : boolean, default True Whether to copy X and operate on the copy or perform in-place operations. Returns ------- X : sparse matrix, [n_samples, n_features] Tf-idf-weighted document-term matrix. """ check_is_fitted(self, '_tfidf', 'The tfidf vector is not fitted') X = super(TfidfVectorizer, self).transform(raw_documents) return self._tfidf.transform(X, copy=False)
bsd-3-clause
thientu/scikit-learn
sklearn/linear_model/tests/test_sgd.py
68
43439
import pickle import unittest import numpy as np import scipy.sparse as sp from sklearn.utils.testing import assert_array_equal from sklearn.utils.testing import assert_almost_equal from sklearn.utils.testing import assert_array_almost_equal from sklearn.utils.testing import assert_greater from sklearn.utils.testing import assert_less from sklearn.utils.testing import raises from sklearn.utils.testing import assert_raises from sklearn.utils.testing import assert_false, assert_true from sklearn.utils.testing import assert_equal from sklearn.utils.testing import assert_raises_regexp from sklearn import linear_model, datasets, metrics from sklearn.base import clone from sklearn.linear_model import SGDClassifier, SGDRegressor from sklearn.preprocessing import LabelEncoder, scale, MinMaxScaler class SparseSGDClassifier(SGDClassifier): def fit(self, X, y, *args, **kw): X = sp.csr_matrix(X) return super(SparseSGDClassifier, self).fit(X, y, *args, **kw) def partial_fit(self, X, y, *args, **kw): X = sp.csr_matrix(X) return super(SparseSGDClassifier, self).partial_fit(X, y, *args, **kw) def decision_function(self, X): X = sp.csr_matrix(X) return super(SparseSGDClassifier, self).decision_function(X) def predict_proba(self, X): X = sp.csr_matrix(X) return super(SparseSGDClassifier, self).predict_proba(X) class SparseSGDRegressor(SGDRegressor): def fit(self, X, y, *args, **kw): X = sp.csr_matrix(X) return SGDRegressor.fit(self, X, y, *args, **kw) def partial_fit(self, X, y, *args, **kw): X = sp.csr_matrix(X) return SGDRegressor.partial_fit(self, X, y, *args, **kw) def decision_function(self, X, *args, **kw): X = sp.csr_matrix(X) return SGDRegressor.decision_function(self, X, *args, **kw) # Test Data # test sample 1 X = np.array([[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]) Y = [1, 1, 1, 2, 2, 2] T = np.array([[-1, -1], [2, 2], [3, 2]]) true_result = [1, 2, 2] # test sample 2; string class labels X2 = np.array([[-1, 1], [-0.75, 0.5], [-1.5, 1.5], [1, 1], [0.75, 0.5], [1.5, 1.5], [-1, -1], [0, -0.5], [1, -1]]) Y2 = ["one"] * 3 + ["two"] * 3 + ["three"] * 3 T2 = np.array([[-1.5, 0.5], [1, 2], [0, -2]]) true_result2 = ["one", "two", "three"] # test sample 3 X3 = np.array([[1, 1, 0, 0, 0, 0], [1, 1, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0], [0, 0, 1, 0, 0, 0], [0, 0, 0, 0, 1, 1], [0, 0, 0, 0, 1, 1], [0, 0, 0, 1, 0, 0], [0, 0, 0, 1, 0, 0]]) Y3 = np.array([1, 1, 1, 1, 2, 2, 2, 2]) # test sample 4 - two more or less redundent feature groups X4 = np.array([[1, 0.9, 0.8, 0, 0, 0], [1, .84, .98, 0, 0, 0], [1, .96, .88, 0, 0, 0], [1, .91, .99, 0, 0, 0], [0, 0, 0, .89, .91, 1], [0, 0, 0, .79, .84, 1], [0, 0, 0, .91, .95, 1], [0, 0, 0, .93, 1, 1]]) Y4 = np.array([1, 1, 1, 1, 2, 2, 2, 2]) iris = datasets.load_iris() # test sample 5 - test sample 1 as binary classification problem X5 = np.array([[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]) Y5 = [1, 1, 1, 2, 2, 2] true_result5 = [0, 1, 1] # Classification Test Case class CommonTest(object): def factory(self, **kwargs): if "random_state" not in kwargs: kwargs["random_state"] = 42 return self.factory_class(**kwargs) # a simple implementation of ASGD to use for testing # uses squared loss to find the gradient def asgd(self, X, y, eta, alpha, weight_init=None, intercept_init=0.0): if weight_init is None: weights = np.zeros(X.shape[1]) else: weights = weight_init average_weights = np.zeros(X.shape[1]) intercept = intercept_init average_intercept = 0.0 decay = 1.0 # sparse data has a fixed decay of .01 if (isinstance(self, SparseSGDClassifierTestCase) or isinstance(self, SparseSGDRegressorTestCase)): decay = .01 for i, entry in enumerate(X): p = np.dot(entry, weights) p += intercept gradient = p - y[i] weights *= 1.0 - (eta * alpha) weights += -(eta * gradient * entry) intercept += -(eta * gradient) * decay average_weights *= i average_weights += weights average_weights /= i + 1.0 average_intercept *= i average_intercept += intercept average_intercept /= i + 1.0 return average_weights, average_intercept def _test_warm_start(self, X, Y, lr): # Test that explicit warm restart... clf = self.factory(alpha=0.01, eta0=0.01, n_iter=5, shuffle=False, learning_rate=lr) clf.fit(X, Y) clf2 = self.factory(alpha=0.001, eta0=0.01, n_iter=5, shuffle=False, learning_rate=lr) clf2.fit(X, Y, coef_init=clf.coef_.copy(), intercept_init=clf.intercept_.copy()) # ... and implicit warm restart are equivalent. clf3 = self.factory(alpha=0.01, eta0=0.01, n_iter=5, shuffle=False, warm_start=True, learning_rate=lr) clf3.fit(X, Y) assert_equal(clf3.t_, clf.t_) assert_array_almost_equal(clf3.coef_, clf.coef_) clf3.set_params(alpha=0.001) clf3.fit(X, Y) assert_equal(clf3.t_, clf2.t_) assert_array_almost_equal(clf3.coef_, clf2.coef_) def test_warm_start_constant(self): self._test_warm_start(X, Y, "constant") def test_warm_start_invscaling(self): self._test_warm_start(X, Y, "invscaling") def test_warm_start_optimal(self): self._test_warm_start(X, Y, "optimal") def test_input_format(self): # Input format tests. clf = self.factory(alpha=0.01, n_iter=5, shuffle=False) clf.fit(X, Y) Y_ = np.array(Y)[:, np.newaxis] Y_ = np.c_[Y_, Y_] assert_raises(ValueError, clf.fit, X, Y_) def test_clone(self): # Test whether clone works ok. clf = self.factory(alpha=0.01, n_iter=5, penalty='l1') clf = clone(clf) clf.set_params(penalty='l2') clf.fit(X, Y) clf2 = self.factory(alpha=0.01, n_iter=5, penalty='l2') clf2.fit(X, Y) assert_array_equal(clf.coef_, clf2.coef_) def test_plain_has_no_average_attr(self): clf = self.factory(average=True, eta0=.01) clf.fit(X, Y) assert_true(hasattr(clf, 'average_coef_')) assert_true(hasattr(clf, 'average_intercept_')) assert_true(hasattr(clf, 'standard_intercept_')) assert_true(hasattr(clf, 'standard_coef_')) clf = self.factory() clf.fit(X, Y) assert_false(hasattr(clf, 'average_coef_')) assert_false(hasattr(clf, 'average_intercept_')) assert_false(hasattr(clf, 'standard_intercept_')) assert_false(hasattr(clf, 'standard_coef_')) def test_late_onset_averaging_not_reached(self): clf1 = self.factory(average=600) clf2 = self.factory() for _ in range(100): if isinstance(clf1, SGDClassifier): clf1.partial_fit(X, Y, classes=np.unique(Y)) clf2.partial_fit(X, Y, classes=np.unique(Y)) else: clf1.partial_fit(X, Y) clf2.partial_fit(X, Y) assert_array_almost_equal(clf1.coef_, clf2.coef_, decimal=16) assert_almost_equal(clf1.intercept_, clf2.intercept_, decimal=16) def test_late_onset_averaging_reached(self): eta0 = .001 alpha = .0001 Y_encode = np.array(Y) Y_encode[Y_encode == 1] = -1.0 Y_encode[Y_encode == 2] = 1.0 clf1 = self.factory(average=7, learning_rate="constant", loss='squared_loss', eta0=eta0, alpha=alpha, n_iter=2, shuffle=False) clf2 = self.factory(average=0, learning_rate="constant", loss='squared_loss', eta0=eta0, alpha=alpha, n_iter=1, shuffle=False) clf1.fit(X, Y_encode) clf2.fit(X, Y_encode) average_weights, average_intercept = \ self.asgd(X, Y_encode, eta0, alpha, weight_init=clf2.coef_.ravel(), intercept_init=clf2.intercept_) assert_array_almost_equal(clf1.coef_.ravel(), average_weights.ravel(), decimal=16) assert_almost_equal(clf1.intercept_, average_intercept, decimal=16) class DenseSGDClassifierTestCase(unittest.TestCase, CommonTest): """Test suite for the dense representation variant of SGD""" factory_class = SGDClassifier def test_sgd(self): # Check that SGD gives any results :-) for loss in ("hinge", "squared_hinge", "log", "modified_huber"): clf = self.factory(penalty='l2', alpha=0.01, fit_intercept=True, loss=loss, n_iter=10, shuffle=True) clf.fit(X, Y) # assert_almost_equal(clf.coef_[0], clf.coef_[1], decimal=7) assert_array_equal(clf.predict(T), true_result) @raises(ValueError) def test_sgd_bad_l1_ratio(self): # Check whether expected ValueError on bad l1_ratio self.factory(l1_ratio=1.1) @raises(ValueError) def test_sgd_bad_learning_rate_schedule(self): # Check whether expected ValueError on bad learning_rate self.factory(learning_rate="<unknown>") @raises(ValueError) def test_sgd_bad_eta0(self): # Check whether expected ValueError on bad eta0 self.factory(eta0=0, learning_rate="constant") @raises(ValueError) def test_sgd_bad_alpha(self): # Check whether expected ValueError on bad alpha self.factory(alpha=-.1) @raises(ValueError) def test_sgd_bad_penalty(self): # Check whether expected ValueError on bad penalty self.factory(penalty='foobar', l1_ratio=0.85) @raises(ValueError) def test_sgd_bad_loss(self): # Check whether expected ValueError on bad loss self.factory(loss="foobar") @raises(ValueError) def test_sgd_n_iter_param(self): # Test parameter validity check self.factory(n_iter=-10000) @raises(ValueError) def test_sgd_shuffle_param(self): # Test parameter validity check self.factory(shuffle="false") @raises(TypeError) def test_argument_coef(self): # Checks coef_init not allowed as model argument (only fit) # Provided coef_ does not match dataset. self.factory(coef_init=np.zeros((3,))).fit(X, Y) @raises(ValueError) def test_provide_coef(self): # Checks coef_init shape for the warm starts # Provided coef_ does not match dataset. self.factory().fit(X, Y, coef_init=np.zeros((3,))) @raises(ValueError) def test_set_intercept(self): # Checks intercept_ shape for the warm starts # Provided intercept_ does not match dataset. self.factory().fit(X, Y, intercept_init=np.zeros((3,))) def test_set_intercept_binary(self): # Checks intercept_ shape for the warm starts in binary case self.factory().fit(X5, Y5, intercept_init=0) def test_average_binary_computed_correctly(self): # Checks the SGDClassifier correctly computes the average weights eta = .1 alpha = 2. n_samples = 20 n_features = 10 rng = np.random.RandomState(0) X = rng.normal(size=(n_samples, n_features)) w = rng.normal(size=n_features) clf = self.factory(loss='squared_loss', learning_rate='constant', eta0=eta, alpha=alpha, fit_intercept=True, n_iter=1, average=True, shuffle=False) # simple linear function without noise y = np.dot(X, w) y = np.sign(y) clf.fit(X, y) average_weights, average_intercept = self.asgd(X, y, eta, alpha) average_weights = average_weights.reshape(1, -1) assert_array_almost_equal(clf.coef_, average_weights, decimal=14) assert_almost_equal(clf.intercept_, average_intercept, decimal=14) def test_set_intercept_to_intercept(self): # Checks intercept_ shape consistency for the warm starts # Inconsistent intercept_ shape. clf = self.factory().fit(X5, Y5) self.factory().fit(X5, Y5, intercept_init=clf.intercept_) clf = self.factory().fit(X, Y) self.factory().fit(X, Y, intercept_init=clf.intercept_) @raises(ValueError) def test_sgd_at_least_two_labels(self): # Target must have at least two labels self.factory(alpha=0.01, n_iter=20).fit(X2, np.ones(9)) def test_partial_fit_weight_class_balanced(self): # partial_fit with class_weight='balanced' not supported""" assert_raises_regexp(ValueError, "class_weight 'balanced' is not supported for " "partial_fit. In order to use 'balanced' weights, " "use compute_class_weight\('balanced', classes, y\). " "In place of y you can us a large enough sample " "of the full training set target to properly " "estimate the class frequency distributions. " "Pass the resulting weights as the class_weight " "parameter.", self.factory(class_weight='balanced').partial_fit, X, Y, classes=np.unique(Y)) def test_sgd_multiclass(self): # Multi-class test case clf = self.factory(alpha=0.01, n_iter=20).fit(X2, Y2) assert_equal(clf.coef_.shape, (3, 2)) assert_equal(clf.intercept_.shape, (3,)) assert_equal(clf.decision_function([[0, 0]]).shape, (1, 3)) pred = clf.predict(T2) assert_array_equal(pred, true_result2) def test_sgd_multiclass_average(self): eta = .001 alpha = .01 # Multi-class average test case clf = self.factory(loss='squared_loss', learning_rate='constant', eta0=eta, alpha=alpha, fit_intercept=True, n_iter=1, average=True, shuffle=False) np_Y2 = np.array(Y2) clf.fit(X2, np_Y2) classes = np.unique(np_Y2) for i, cl in enumerate(classes): y_i = np.ones(np_Y2.shape[0]) y_i[np_Y2 != cl] = -1 average_coef, average_intercept = self.asgd(X2, y_i, eta, alpha) assert_array_almost_equal(average_coef, clf.coef_[i], decimal=16) assert_almost_equal(average_intercept, clf.intercept_[i], decimal=16) def test_sgd_multiclass_with_init_coef(self): # Multi-class test case clf = self.factory(alpha=0.01, n_iter=20) clf.fit(X2, Y2, coef_init=np.zeros((3, 2)), intercept_init=np.zeros(3)) assert_equal(clf.coef_.shape, (3, 2)) assert_true(clf.intercept_.shape, (3,)) pred = clf.predict(T2) assert_array_equal(pred, true_result2) def test_sgd_multiclass_njobs(self): # Multi-class test case with multi-core support clf = self.factory(alpha=0.01, n_iter=20, n_jobs=2).fit(X2, Y2) assert_equal(clf.coef_.shape, (3, 2)) assert_equal(clf.intercept_.shape, (3,)) assert_equal(clf.decision_function([[0, 0]]).shape, (1, 3)) pred = clf.predict(T2) assert_array_equal(pred, true_result2) def test_set_coef_multiclass(self): # Checks coef_init and intercept_init shape for for multi-class # problems # Provided coef_ does not match dataset clf = self.factory() assert_raises(ValueError, clf.fit, X2, Y2, coef_init=np.zeros((2, 2))) # Provided coef_ does match dataset clf = self.factory().fit(X2, Y2, coef_init=np.zeros((3, 2))) # Provided intercept_ does not match dataset clf = self.factory() assert_raises(ValueError, clf.fit, X2, Y2, intercept_init=np.zeros((1,))) # Provided intercept_ does match dataset. clf = self.factory().fit(X2, Y2, intercept_init=np.zeros((3,))) def test_sgd_proba(self): # Check SGD.predict_proba # Hinge loss does not allow for conditional prob estimate. # We cannot use the factory here, because it defines predict_proba # anyway. clf = SGDClassifier(loss="hinge", alpha=0.01, n_iter=10).fit(X, Y) assert_false(hasattr(clf, "predict_proba")) assert_false(hasattr(clf, "predict_log_proba")) # log and modified_huber losses can output probability estimates # binary case for loss in ["log", "modified_huber"]: clf = self.factory(loss="modified_huber", alpha=0.01, n_iter=10) clf.fit(X, Y) p = clf.predict_proba([[3, 2]]) assert_true(p[0, 1] > 0.5) p = clf.predict_proba([[-1, -1]]) assert_true(p[0, 1] < 0.5) p = clf.predict_log_proba([[3, 2]]) assert_true(p[0, 1] > p[0, 0]) p = clf.predict_log_proba([[-1, -1]]) assert_true(p[0, 1] < p[0, 0]) # log loss multiclass probability estimates clf = self.factory(loss="log", alpha=0.01, n_iter=10).fit(X2, Y2) d = clf.decision_function([[.1, -.1], [.3, .2]]) p = clf.predict_proba([[.1, -.1], [.3, .2]]) assert_array_equal(np.argmax(p, axis=1), np.argmax(d, axis=1)) assert_almost_equal(p[0].sum(), 1) assert_true(np.all(p[0] >= 0)) p = clf.predict_proba([[-1, -1]]) d = clf.decision_function([[-1, -1]]) assert_array_equal(np.argsort(p[0]), np.argsort(d[0])) l = clf.predict_log_proba([[3, 2]]) p = clf.predict_proba([[3, 2]]) assert_array_almost_equal(np.log(p), l) l = clf.predict_log_proba([[-1, -1]]) p = clf.predict_proba([[-1, -1]]) assert_array_almost_equal(np.log(p), l) # Modified Huber multiclass probability estimates; requires a separate # test because the hard zero/one probabilities may destroy the # ordering present in decision_function output. clf = self.factory(loss="modified_huber", alpha=0.01, n_iter=10) clf.fit(X2, Y2) d = clf.decision_function([[3, 2]]) p = clf.predict_proba([[3, 2]]) if not isinstance(self, SparseSGDClassifierTestCase): assert_equal(np.argmax(d, axis=1), np.argmax(p, axis=1)) else: # XXX the sparse test gets a different X2 (?) assert_equal(np.argmin(d, axis=1), np.argmin(p, axis=1)) # the following sample produces decision_function values < -1, # which would cause naive normalization to fail (see comment # in SGDClassifier.predict_proba) x = X.mean(axis=0) d = clf.decision_function([x]) if np.all(d < -1): # XXX not true in sparse test case (why?) p = clf.predict_proba([x]) assert_array_almost_equal(p[0], [1 / 3.] * 3) def test_sgd_l1(self): # Test L1 regularization n = len(X4) rng = np.random.RandomState(13) idx = np.arange(n) rng.shuffle(idx) X = X4[idx, :] Y = Y4[idx] clf = self.factory(penalty='l1', alpha=.2, fit_intercept=False, n_iter=2000, shuffle=False) clf.fit(X, Y) assert_array_equal(clf.coef_[0, 1:-1], np.zeros((4,))) pred = clf.predict(X) assert_array_equal(pred, Y) # test sparsify with dense inputs clf.sparsify() assert_true(sp.issparse(clf.coef_)) pred = clf.predict(X) assert_array_equal(pred, Y) # pickle and unpickle with sparse coef_ clf = pickle.loads(pickle.dumps(clf)) assert_true(sp.issparse(clf.coef_)) pred = clf.predict(X) assert_array_equal(pred, Y) def test_class_weights(self): # Test class weights. X = np.array([[-1.0, -1.0], [-1.0, 0], [-.8, -1.0], [1.0, 1.0], [1.0, 0.0]]) y = [1, 1, 1, -1, -1] clf = self.factory(alpha=0.1, n_iter=1000, fit_intercept=False, class_weight=None) clf.fit(X, y) assert_array_equal(clf.predict([[0.2, -1.0]]), np.array([1])) # we give a small weights to class 1 clf = self.factory(alpha=0.1, n_iter=1000, fit_intercept=False, class_weight={1: 0.001}) clf.fit(X, y) # now the hyperplane should rotate clock-wise and # the prediction on this point should shift assert_array_equal(clf.predict([[0.2, -1.0]]), np.array([-1])) def test_equal_class_weight(self): # Test if equal class weights approx. equals no class weights. X = [[1, 0], [1, 0], [0, 1], [0, 1]] y = [0, 0, 1, 1] clf = self.factory(alpha=0.1, n_iter=1000, class_weight=None) clf.fit(X, y) X = [[1, 0], [0, 1]] y = [0, 1] clf_weighted = self.factory(alpha=0.1, n_iter=1000, class_weight={0: 0.5, 1: 0.5}) clf_weighted.fit(X, y) # should be similar up to some epsilon due to learning rate schedule assert_almost_equal(clf.coef_, clf_weighted.coef_, decimal=2) @raises(ValueError) def test_wrong_class_weight_label(self): # ValueError due to not existing class label. clf = self.factory(alpha=0.1, n_iter=1000, class_weight={0: 0.5}) clf.fit(X, Y) @raises(ValueError) def test_wrong_class_weight_format(self): # ValueError due to wrong class_weight argument type. clf = self.factory(alpha=0.1, n_iter=1000, class_weight=[0.5]) clf.fit(X, Y) def test_weights_multiplied(self): # Tests that class_weight and sample_weight are multiplicative class_weights = {1: .6, 2: .3} sample_weights = np.random.random(Y4.shape[0]) multiplied_together = np.copy(sample_weights) multiplied_together[Y4 == 1] *= class_weights[1] multiplied_together[Y4 == 2] *= class_weights[2] clf1 = self.factory(alpha=0.1, n_iter=20, class_weight=class_weights) clf2 = self.factory(alpha=0.1, n_iter=20) clf1.fit(X4, Y4, sample_weight=sample_weights) clf2.fit(X4, Y4, sample_weight=multiplied_together) assert_almost_equal(clf1.coef_, clf2.coef_) def test_balanced_weight(self): # Test class weights for imbalanced data""" # compute reference metrics on iris dataset that is quite balanced by # default X, y = iris.data, iris.target X = scale(X) idx = np.arange(X.shape[0]) rng = np.random.RandomState(6) rng.shuffle(idx) X = X[idx] y = y[idx] clf = self.factory(alpha=0.0001, n_iter=1000, class_weight=None, shuffle=False).fit(X, y) assert_almost_equal(metrics.f1_score(y, clf.predict(X), average='weighted'), 0.96, decimal=1) # make the same prediction using balanced class_weight clf_balanced = self.factory(alpha=0.0001, n_iter=1000, class_weight="balanced", shuffle=False).fit(X, y) assert_almost_equal(metrics.f1_score(y, clf_balanced.predict(X), average='weighted'), 0.96, decimal=1) # Make sure that in the balanced case it does not change anything # to use "balanced" assert_array_almost_equal(clf.coef_, clf_balanced.coef_, 6) # build an very very imbalanced dataset out of iris data X_0 = X[y == 0, :] y_0 = y[y == 0] X_imbalanced = np.vstack([X] + [X_0] * 10) y_imbalanced = np.concatenate([y] + [y_0] * 10) # fit a model on the imbalanced data without class weight info clf = self.factory(n_iter=1000, class_weight=None, shuffle=False) clf.fit(X_imbalanced, y_imbalanced) y_pred = clf.predict(X) assert_less(metrics.f1_score(y, y_pred, average='weighted'), 0.96) # fit a model with balanced class_weight enabled clf = self.factory(n_iter=1000, class_weight="balanced", shuffle=False) clf.fit(X_imbalanced, y_imbalanced) y_pred = clf.predict(X) assert_greater(metrics.f1_score(y, y_pred, average='weighted'), 0.96) # fit another using a fit parameter override clf = self.factory(n_iter=1000, class_weight="balanced", shuffle=False) clf.fit(X_imbalanced, y_imbalanced) y_pred = clf.predict(X) assert_greater(metrics.f1_score(y, y_pred, average='weighted'), 0.96) def test_sample_weights(self): # Test weights on individual samples X = np.array([[-1.0, -1.0], [-1.0, 0], [-.8, -1.0], [1.0, 1.0], [1.0, 0.0]]) y = [1, 1, 1, -1, -1] clf = self.factory(alpha=0.1, n_iter=1000, fit_intercept=False) clf.fit(X, y) assert_array_equal(clf.predict([[0.2, -1.0]]), np.array([1])) # we give a small weights to class 1 clf.fit(X, y, sample_weight=[0.001] * 3 + [1] * 2) # now the hyperplane should rotate clock-wise and # the prediction on this point should shift assert_array_equal(clf.predict([[0.2, -1.0]]), np.array([-1])) @raises(ValueError) def test_wrong_sample_weights(self): # Test if ValueError is raised if sample_weight has wrong shape clf = self.factory(alpha=0.1, n_iter=1000, fit_intercept=False) # provided sample_weight too long clf.fit(X, Y, sample_weight=np.arange(7)) @raises(ValueError) def test_partial_fit_exception(self): clf = self.factory(alpha=0.01) # classes was not specified clf.partial_fit(X3, Y3) def test_partial_fit_binary(self): third = X.shape[0] // 3 clf = self.factory(alpha=0.01) classes = np.unique(Y) clf.partial_fit(X[:third], Y[:third], classes=classes) assert_equal(clf.coef_.shape, (1, X.shape[1])) assert_equal(clf.intercept_.shape, (1,)) assert_equal(clf.decision_function([[0, 0]]).shape, (1, )) id1 = id(clf.coef_.data) clf.partial_fit(X[third:], Y[third:]) id2 = id(clf.coef_.data) # check that coef_ haven't been re-allocated assert_true(id1, id2) y_pred = clf.predict(T) assert_array_equal(y_pred, true_result) def test_partial_fit_multiclass(self): third = X2.shape[0] // 3 clf = self.factory(alpha=0.01) classes = np.unique(Y2) clf.partial_fit(X2[:third], Y2[:third], classes=classes) assert_equal(clf.coef_.shape, (3, X2.shape[1])) assert_equal(clf.intercept_.shape, (3,)) assert_equal(clf.decision_function([[0, 0]]).shape, (1, 3)) id1 = id(clf.coef_.data) clf.partial_fit(X2[third:], Y2[third:]) id2 = id(clf.coef_.data) # check that coef_ haven't been re-allocated assert_true(id1, id2) def test_fit_then_partial_fit(self): # Partial_fit should work after initial fit in the multiclass case. # Non-regression test for #2496; fit would previously produce a # Fortran-ordered coef_ that subsequent partial_fit couldn't handle. clf = self.factory() clf.fit(X2, Y2) clf.partial_fit(X2, Y2) # no exception here def _test_partial_fit_equal_fit(self, lr): for X_, Y_, T_ in ((X, Y, T), (X2, Y2, T2)): clf = self.factory(alpha=0.01, eta0=0.01, n_iter=2, learning_rate=lr, shuffle=False) clf.fit(X_, Y_) y_pred = clf.decision_function(T_) t = clf.t_ classes = np.unique(Y_) clf = self.factory(alpha=0.01, eta0=0.01, learning_rate=lr, shuffle=False) for i in range(2): clf.partial_fit(X_, Y_, classes=classes) y_pred2 = clf.decision_function(T_) assert_equal(clf.t_, t) assert_array_almost_equal(y_pred, y_pred2, decimal=2) def test_partial_fit_equal_fit_constant(self): self._test_partial_fit_equal_fit("constant") def test_partial_fit_equal_fit_optimal(self): self._test_partial_fit_equal_fit("optimal") def test_partial_fit_equal_fit_invscaling(self): self._test_partial_fit_equal_fit("invscaling") def test_regression_losses(self): clf = self.factory(alpha=0.01, learning_rate="constant", eta0=0.1, loss="epsilon_insensitive") clf.fit(X, Y) assert_equal(1.0, np.mean(clf.predict(X) == Y)) clf = self.factory(alpha=0.01, learning_rate="constant", eta0=0.1, loss="squared_epsilon_insensitive") clf.fit(X, Y) assert_equal(1.0, np.mean(clf.predict(X) == Y)) clf = self.factory(alpha=0.01, loss="huber") clf.fit(X, Y) assert_equal(1.0, np.mean(clf.predict(X) == Y)) clf = self.factory(alpha=0.01, learning_rate="constant", eta0=0.01, loss="squared_loss") clf.fit(X, Y) assert_equal(1.0, np.mean(clf.predict(X) == Y)) def test_warm_start_multiclass(self): self._test_warm_start(X2, Y2, "optimal") def test_multiple_fit(self): # Test multiple calls of fit w/ different shaped inputs. clf = self.factory(alpha=0.01, n_iter=5, shuffle=False) clf.fit(X, Y) assert_true(hasattr(clf, "coef_")) # Non-regression test: try fitting with a different label set. y = [["ham", "spam"][i] for i in LabelEncoder().fit_transform(Y)] clf.fit(X[:, :-1], y) class SparseSGDClassifierTestCase(DenseSGDClassifierTestCase): """Run exactly the same tests using the sparse representation variant""" factory_class = SparseSGDClassifier ############################################################################### # Regression Test Case class DenseSGDRegressorTestCase(unittest.TestCase, CommonTest): """Test suite for the dense representation variant of SGD""" factory_class = SGDRegressor def test_sgd(self): # Check that SGD gives any results. clf = self.factory(alpha=0.1, n_iter=2, fit_intercept=False) clf.fit([[0, 0], [1, 1], [2, 2]], [0, 1, 2]) assert_equal(clf.coef_[0], clf.coef_[1]) @raises(ValueError) def test_sgd_bad_penalty(self): # Check whether expected ValueError on bad penalty self.factory(penalty='foobar', l1_ratio=0.85) @raises(ValueError) def test_sgd_bad_loss(self): # Check whether expected ValueError on bad loss self.factory(loss="foobar") def test_sgd_averaged_computed_correctly(self): # Tests the average regressor matches the naive implementation eta = .001 alpha = .01 n_samples = 20 n_features = 10 rng = np.random.RandomState(0) X = rng.normal(size=(n_samples, n_features)) w = rng.normal(size=n_features) # simple linear function without noise y = np.dot(X, w) clf = self.factory(loss='squared_loss', learning_rate='constant', eta0=eta, alpha=alpha, fit_intercept=True, n_iter=1, average=True, shuffle=False) clf.fit(X, y) average_weights, average_intercept = self.asgd(X, y, eta, alpha) assert_array_almost_equal(clf.coef_, average_weights, decimal=16) assert_almost_equal(clf.intercept_, average_intercept, decimal=16) def test_sgd_averaged_partial_fit(self): # Tests whether the partial fit yields the same average as the fit eta = .001 alpha = .01 n_samples = 20 n_features = 10 rng = np.random.RandomState(0) X = rng.normal(size=(n_samples, n_features)) w = rng.normal(size=n_features) # simple linear function without noise y = np.dot(X, w) clf = self.factory(loss='squared_loss', learning_rate='constant', eta0=eta, alpha=alpha, fit_intercept=True, n_iter=1, average=True, shuffle=False) clf.partial_fit(X[:int(n_samples / 2)][:], y[:int(n_samples / 2)]) clf.partial_fit(X[int(n_samples / 2):][:], y[int(n_samples / 2):]) average_weights, average_intercept = self.asgd(X, y, eta, alpha) assert_array_almost_equal(clf.coef_, average_weights, decimal=16) assert_almost_equal(clf.intercept_[0], average_intercept, decimal=16) def test_average_sparse(self): # Checks the average weights on data with 0s eta = .001 alpha = .01 clf = self.factory(loss='squared_loss', learning_rate='constant', eta0=eta, alpha=alpha, fit_intercept=True, n_iter=1, average=True, shuffle=False) n_samples = Y3.shape[0] clf.partial_fit(X3[:int(n_samples / 2)][:], Y3[:int(n_samples / 2)]) clf.partial_fit(X3[int(n_samples / 2):][:], Y3[int(n_samples / 2):]) average_weights, average_intercept = self.asgd(X3, Y3, eta, alpha) assert_array_almost_equal(clf.coef_, average_weights, decimal=16) assert_almost_equal(clf.intercept_, average_intercept, decimal=16) def test_sgd_least_squares_fit(self): xmin, xmax = -5, 5 n_samples = 100 rng = np.random.RandomState(0) X = np.linspace(xmin, xmax, n_samples).reshape(n_samples, 1) # simple linear function without noise y = 0.5 * X.ravel() clf = self.factory(loss='squared_loss', alpha=0.1, n_iter=20, fit_intercept=False) clf.fit(X, y) score = clf.score(X, y) assert_greater(score, 0.99) # simple linear function with noise y = 0.5 * X.ravel() + rng.randn(n_samples, 1).ravel() clf = self.factory(loss='squared_loss', alpha=0.1, n_iter=20, fit_intercept=False) clf.fit(X, y) score = clf.score(X, y) assert_greater(score, 0.5) def test_sgd_epsilon_insensitive(self): xmin, xmax = -5, 5 n_samples = 100 X = np.linspace(xmin, xmax, n_samples).reshape(n_samples, 1) # simple linear function without noise y = 0.5 * X.ravel() clf = self.factory(loss='epsilon_insensitive', epsilon=0.01, alpha=0.1, n_iter=20, fit_intercept=False) clf.fit(X, y) score = clf.score(X, y) assert_true(score > 0.99) # simple linear function with noise y = 0.5 * X.ravel() \ + np.random.randn(n_samples, 1).ravel() clf = self.factory(loss='epsilon_insensitive', epsilon=0.01, alpha=0.1, n_iter=20, fit_intercept=False) clf.fit(X, y) score = clf.score(X, y) assert_true(score > 0.5) def test_sgd_huber_fit(self): xmin, xmax = -5, 5 n_samples = 100 rng = np.random.RandomState(0) X = np.linspace(xmin, xmax, n_samples).reshape(n_samples, 1) # simple linear function without noise y = 0.5 * X.ravel() clf = self.factory(loss="huber", epsilon=0.1, alpha=0.1, n_iter=20, fit_intercept=False) clf.fit(X, y) score = clf.score(X, y) assert_greater(score, 0.99) # simple linear function with noise y = 0.5 * X.ravel() + rng.randn(n_samples, 1).ravel() clf = self.factory(loss="huber", epsilon=0.1, alpha=0.1, n_iter=20, fit_intercept=False) clf.fit(X, y) score = clf.score(X, y) assert_greater(score, 0.5) def test_elasticnet_convergence(self): # Check that the SGD output is consistent with coordinate descent n_samples, n_features = 1000, 5 rng = np.random.RandomState(0) X = np.random.randn(n_samples, n_features) # ground_truth linear model that generate y from X and to which the # models should converge if the regularizer would be set to 0.0 ground_truth_coef = rng.randn(n_features) y = np.dot(X, ground_truth_coef) # XXX: alpha = 0.1 seems to cause convergence problems for alpha in [0.01, 0.001]: for l1_ratio in [0.5, 0.8, 1.0]: cd = linear_model.ElasticNet(alpha=alpha, l1_ratio=l1_ratio, fit_intercept=False) cd.fit(X, y) sgd = self.factory(penalty='elasticnet', n_iter=50, alpha=alpha, l1_ratio=l1_ratio, fit_intercept=False) sgd.fit(X, y) err_msg = ("cd and sgd did not converge to comparable " "results for alpha=%f and l1_ratio=%f" % (alpha, l1_ratio)) assert_almost_equal(cd.coef_, sgd.coef_, decimal=2, err_msg=err_msg) def test_partial_fit(self): third = X.shape[0] // 3 clf = self.factory(alpha=0.01) clf.partial_fit(X[:third], Y[:third]) assert_equal(clf.coef_.shape, (X.shape[1], )) assert_equal(clf.intercept_.shape, (1,)) assert_equal(clf.decision_function([[0, 0]]).shape, (1, )) id1 = id(clf.coef_.data) clf.partial_fit(X[third:], Y[third:]) id2 = id(clf.coef_.data) # check that coef_ haven't been re-allocated assert_true(id1, id2) def _test_partial_fit_equal_fit(self, lr): clf = self.factory(alpha=0.01, n_iter=2, eta0=0.01, learning_rate=lr, shuffle=False) clf.fit(X, Y) y_pred = clf.predict(T) t = clf.t_ clf = self.factory(alpha=0.01, eta0=0.01, learning_rate=lr, shuffle=False) for i in range(2): clf.partial_fit(X, Y) y_pred2 = clf.predict(T) assert_equal(clf.t_, t) assert_array_almost_equal(y_pred, y_pred2, decimal=2) def test_partial_fit_equal_fit_constant(self): self._test_partial_fit_equal_fit("constant") def test_partial_fit_equal_fit_optimal(self): self._test_partial_fit_equal_fit("optimal") def test_partial_fit_equal_fit_invscaling(self): self._test_partial_fit_equal_fit("invscaling") def test_loss_function_epsilon(self): clf = self.factory(epsilon=0.9) clf.set_params(epsilon=0.1) assert clf.loss_functions['huber'][1] == 0.1 class SparseSGDRegressorTestCase(DenseSGDRegressorTestCase): # Run exactly the same tests using the sparse representation variant factory_class = SparseSGDRegressor def test_l1_ratio(): # Test if l1 ratio extremes match L1 and L2 penalty settings. X, y = datasets.make_classification(n_samples=1000, n_features=100, n_informative=20, random_state=1234) # test if elasticnet with l1_ratio near 1 gives same result as pure l1 est_en = SGDClassifier(alpha=0.001, penalty='elasticnet', l1_ratio=0.9999999999, random_state=42).fit(X, y) est_l1 = SGDClassifier(alpha=0.001, penalty='l1', random_state=42).fit(X, y) assert_array_almost_equal(est_en.coef_, est_l1.coef_) # test if elasticnet with l1_ratio near 0 gives same result as pure l2 est_en = SGDClassifier(alpha=0.001, penalty='elasticnet', l1_ratio=0.0000000001, random_state=42).fit(X, y) est_l2 = SGDClassifier(alpha=0.001, penalty='l2', random_state=42).fit(X, y) assert_array_almost_equal(est_en.coef_, est_l2.coef_) def test_underflow_or_overlow(): with np.errstate(all='raise'): # Generate some weird data with hugely unscaled features rng = np.random.RandomState(0) n_samples = 100 n_features = 10 X = rng.normal(size=(n_samples, n_features)) X[:, :2] *= 1e300 assert_true(np.isfinite(X).all()) # Use MinMaxScaler to scale the data without introducing a numerical # instability (computing the standard deviation naively is not possible # on this data) X_scaled = MinMaxScaler().fit_transform(X) assert_true(np.isfinite(X_scaled).all()) # Define a ground truth on the scaled data ground_truth = rng.normal(size=n_features) y = (np.dot(X_scaled, ground_truth) > 0.).astype(np.int32) assert_array_equal(np.unique(y), [0, 1]) model = SGDClassifier(alpha=0.1, loss='squared_hinge', n_iter=500) # smoke test: model is stable on scaled data model.fit(X_scaled, y) assert_true(np.isfinite(model.coef_).all()) # model is numerically unstable on unscaled data msg_regxp = (r"Floating-point under-/overflow occurred at epoch #.*" " Scaling input data with StandardScaler or MinMaxScaler" " might help.") assert_raises_regexp(ValueError, msg_regxp, model.fit, X, y) def test_numerical_stability_large_gradient(): # Non regression test case for numerical stability on scaled problems # where the gradient can still explode with some losses model = SGDClassifier(loss='squared_hinge', n_iter=10, shuffle=True, penalty='elasticnet', l1_ratio=0.3, alpha=0.01, eta0=0.001, random_state=0) with np.errstate(all='raise'): model.fit(iris.data, iris.target) assert_true(np.isfinite(model.coef_).all()) def test_large_regularization(): # Non regression tests for numerical stability issues caused by large # regularization parameters for penalty in ['l2', 'l1', 'elasticnet']: model = SGDClassifier(alpha=1e5, learning_rate='constant', eta0=0.1, n_iter=5, penalty=penalty, shuffle=False) with np.errstate(all='raise'): model.fit(iris.data, iris.target) assert_array_almost_equal(model.coef_, np.zeros_like(model.coef_))
bsd-3-clause
woobe/h2o
py/testdir_multi_jvm/test_import_covtype_parse_3jvm_fvec.py
1
1632
import unittest, sys, random, time sys.path.extend(['.','..','py']) import h2o, h2o_browse as h2b, h2o_import as h2i, h2o_hosts class Basic(unittest.TestCase): def tearDown(self): h2o.check_sandbox_for_errors() @classmethod def setUpClass(cls): pass print "Will build clouds with incrementing heap sizes and import folder/parse" @classmethod def tearDownClass(cls): h2o.tear_down_cloud() def test_import_covtype_parse_3jvm_fvec(self): h2o.beta_features = True csvFilename = "covtype.data" importFolderPath = "standard" trialMax = 2 for tryHeap in [4,3,2,1]: print "\n", tryHeap,"GB heap, 3 jvms, import folder, then loop parsing 'covtype.data' to unique keys" localhost = h2o.decide_if_localhost() if (localhost): h2o.build_cloud(node_count=3, java_heap_GB=tryHeap) else: h2o_hosts.build_cloud_with_hosts(node_count=3, java_heap_GB=tryHeap) for trial in range(trialMax): # import each time, because h2o deletes source file after parse csvPathname = importFolderPath + "/" + csvFilename hex_key = csvFilename + "_" + str(trial) + ".hex" parseResult = h2i.import_parse(bucket='home-0xdiag-datasets', path=csvPathname, hex_key=hex_key, timeoutSecs=20) # sticky ports? h2o.tear_down_cloud() time.sleep(5) print "Waiting 60 secs for TIME_WAIT sockets to go away" time.sleep(60) if __name__ == '__main__': h2o.unit_main()
apache-2.0
ningchi/scikit-learn
sklearn/kernel_approximation.py
18
17705
""" The :mod:`sklearn.kernel_approximation` module implements several approximate kernel feature maps base on Fourier transforms. """ # Author: Andreas Mueller <amueller@ais.uni-bonn.de> # # License: BSD 3 clause import warnings import numpy as np import scipy.sparse as sp from scipy.linalg import svd from .base import BaseEstimator from .base import TransformerMixin from .utils import check_array, check_random_state, as_float_array from .utils.extmath import safe_sparse_dot from .utils.validation import check_is_fitted from .metrics.pairwise import pairwise_kernels class RBFSampler(BaseEstimator, TransformerMixin): """Approximates feature map of an RBF kernel by Monte Carlo approximation of its Fourier transform. It implements a variant of Random Kitchen Sinks.[1] Parameters ---------- gamma : float Parameter of RBF kernel: exp(-gamma * x^2) n_components : int Number of Monte Carlo samples per original feature. Equals the dimensionality of the computed feature space. random_state : {int, RandomState}, optional If int, random_state is the seed used by the random number generator; if RandomState instance, random_state is the random number generator. Notes ----- See "Random Features for Large-Scale Kernel Machines" by A. Rahimi and Benjamin Recht. [1] "Weighted Sums of Random Kitchen Sinks: Replacing minimization with randomization in learning" by A. Rahimi and Benjamin Recht. (http://www.eecs.berkeley.edu/~brecht/papers/08.rah.rec.nips.pdf) """ def __init__(self, gamma=1., n_components=100, random_state=None): self.gamma = gamma self.n_components = n_components self.random_state = random_state def fit(self, X, y=None): """Fit the model with X. Samples random projection according to n_features. Parameters ---------- X : {array-like, sparse matrix}, shape (n_samples, n_features) Training data, where n_samples in the number of samples and n_features is the number of features. Returns ------- self : object Returns the transformer. """ X = check_array(X, accept_sparse='csr') random_state = check_random_state(self.random_state) n_features = X.shape[1] self.random_weights_ = (np.sqrt(2 * self.gamma) * random_state.normal( size=(n_features, self.n_components))) self.random_offset_ = random_state.uniform(0, 2 * np.pi, size=self.n_components) return self def transform(self, X, y=None): """Apply the approximate feature map to X. Parameters ---------- X : {array-like, sparse matrix}, shape (n_samples, n_features) New data, where n_samples in the number of samples and n_features is the number of features. Returns ------- X_new : array-like, shape (n_samples, n_components) """ check_is_fitted(self, 'random_weights_') X = check_array(X, accept_sparse='csr') projection = safe_sparse_dot(X, self.random_weights_) projection += self.random_offset_ np.cos(projection, projection) projection *= np.sqrt(2.) / np.sqrt(self.n_components) return projection class SkewedChi2Sampler(BaseEstimator, TransformerMixin): """Approximates feature map of the "skewed chi-squared" kernel by Monte Carlo approximation of its Fourier transform. Parameters ---------- skewedness : float "skewedness" parameter of the kernel. Needs to be cross-validated. n_components : int number of Monte Carlo samples per original feature. Equals the dimensionality of the computed feature space. random_state : {int, RandomState}, optional If int, random_state is the seed used by the random number generator; if RandomState instance, random_state is the random number generator. References ---------- See "Random Fourier Approximations for Skewed Multiplicative Histogram Kernels" by Fuxin Li, Catalin Ionescu and Cristian Sminchisescu. See also -------- AdditiveChi2Sampler : A different approach for approximating an additive variant of the chi squared kernel. sklearn.metrics.pairwise.chi2_kernel : The exact chi squared kernel. """ def __init__(self, skewedness=1., n_components=100, random_state=None): self.skewedness = skewedness self.n_components = n_components self.random_state = random_state def fit(self, X, y=None): """Fit the model with X. Samples random projection according to n_features. Parameters ---------- X : array-like, shape (n_samples, n_features) Training data, where n_samples in the number of samples and n_features is the number of features. Returns ------- self : object Returns the transformer. """ X = check_array(X) random_state = check_random_state(self.random_state) n_features = X.shape[1] uniform = random_state.uniform(size=(n_features, self.n_components)) # transform by inverse CDF of sech self.random_weights_ = (1. / np.pi * np.log(np.tan(np.pi / 2. * uniform))) self.random_offset_ = random_state.uniform(0, 2 * np.pi, size=self.n_components) return self def transform(self, X, y=None): """Apply the approximate feature map to X. Parameters ---------- X : array-like, shape (n_samples, n_features) New data, where n_samples in the number of samples and n_features is the number of features. Returns ------- X_new : array-like, shape (n_samples, n_components) """ check_is_fitted(self, 'random_weights_') X = as_float_array(X, copy=True) X = check_array(X, copy=False) if (X < 0).any(): raise ValueError("X may not contain entries smaller than zero.") X += self.skewedness np.log(X, X) projection = safe_sparse_dot(X, self.random_weights_) projection += self.random_offset_ np.cos(projection, projection) projection *= np.sqrt(2.) / np.sqrt(self.n_components) return projection class AdditiveChi2Sampler(BaseEstimator, TransformerMixin): """Approximate feature map for additive chi2 kernel. Uses sampling the fourier transform of the kernel characteristic at regular intervals. Since the kernel that is to be approximated is additive, the components of the input vectors can be treated separately. Each entry in the original space is transformed into 2*sample_steps+1 features, where sample_steps is a parameter of the method. Typical values of sample_steps include 1, 2 and 3. Optimal choices for the sampling interval for certain data ranges can be computed (see the reference). The default values should be reasonable. Parameters ---------- sample_steps : int, optional Gives the number of (complex) sampling points. sample_interval : float, optional Sampling interval. Must be specified when sample_steps not in {1,2,3}. Notes ----- This estimator approximates a slightly different version of the additive chi squared kernel then ``metric.additive_chi2`` computes. See also -------- SkewedChi2Sampler : A Fourier-approximation to a non-additive variant of the chi squared kernel. sklearn.metrics.pairwise.chi2_kernel : The exact chi squared kernel. sklearn.metrics.pairwise.additive_chi2_kernel : The exact additive chi squared kernel. References ---------- See `"Efficient additive kernels via explicit feature maps" <http://eprints.pascal-network.org/archive/00006964/01/vedaldi10.pdf>`_ Vedaldi, A. and Zisserman, A., Computer Vision and Pattern Recognition 2010 """ def __init__(self, sample_steps=2, sample_interval=None): self.sample_steps = sample_steps self.sample_interval = sample_interval def fit(self, X, y=None): """Set parameters.""" X = check_array(X, accept_sparse='csr') if self.sample_interval is None: # See reference, figure 2 c) if self.sample_steps == 1: self.sample_interval_ = 0.8 elif self.sample_steps == 2: self.sample_interval_ = 0.5 elif self.sample_steps == 3: self.sample_interval_ = 0.4 else: raise ValueError("If sample_steps is not in [1, 2, 3]," " you need to provide sample_interval") else: self.sample_interval_ = self.sample_interval return self def transform(self, X, y=None): """Apply approximate feature map to X. Parameters ---------- X : {array-like, sparse matrix}, shape = (n_samples, n_features) Returns ------- X_new : {array, sparse matrix}, \ shape = (n_samples, n_features * (2*sample_steps + 1)) Whether the return value is an array of sparse matrix depends on the type of the input X. """ msg = ("%(name)s is not fitted. Call fit to set the parameters before" " calling transform") check_is_fitted(self, "sample_interval_", msg=msg) X = check_array(X, accept_sparse='csr') sparse = sp.issparse(X) # check if X has negative values. Doesn't play well with np.log. if ((X.data if sparse else X) < 0).any(): raise ValueError("Entries of X must be non-negative.") # zeroth component # 1/cosh = sech # cosh(0) = 1.0 transf = self._transform_sparse if sparse else self._transform_dense return transf(X) def _transform_dense(self, X): non_zero = (X != 0.0) X_nz = X[non_zero] X_step = np.zeros_like(X) X_step[non_zero] = np.sqrt(X_nz * self.sample_interval_) X_new = [X_step] log_step_nz = self.sample_interval_ * np.log(X_nz) step_nz = 2 * X_nz * self.sample_interval_ for j in range(1, self.sample_steps): factor_nz = np.sqrt(step_nz / np.cosh(np.pi * j * self.sample_interval_)) X_step = np.zeros_like(X) X_step[non_zero] = factor_nz * np.cos(j * log_step_nz) X_new.append(X_step) X_step = np.zeros_like(X) X_step[non_zero] = factor_nz * np.sin(j * log_step_nz) X_new.append(X_step) return np.hstack(X_new) def _transform_sparse(self, X): indices = X.indices.copy() indptr = X.indptr.copy() data_step = np.sqrt(X.data * self.sample_interval_) X_step = sp.csr_matrix((data_step, indices, indptr), shape=X.shape, dtype=X.dtype, copy=False) X_new = [X_step] log_step_nz = self.sample_interval_ * np.log(X.data) step_nz = 2 * X.data * self.sample_interval_ for j in range(1, self.sample_steps): factor_nz = np.sqrt(step_nz / np.cosh(np.pi * j * self.sample_interval_)) data_step = factor_nz * np.cos(j * log_step_nz) X_step = sp.csr_matrix((data_step, indices, indptr), shape=X.shape, dtype=X.dtype, copy=False) X_new.append(X_step) data_step = factor_nz * np.sin(j * log_step_nz) X_step = sp.csr_matrix((data_step, indices, indptr), shape=X.shape, dtype=X.dtype, copy=False) X_new.append(X_step) return sp.hstack(X_new) class Nystroem(BaseEstimator, TransformerMixin): """Approximate a kernel map using a subset of the training data. Constructs an approximate feature map for an arbitrary kernel using a subset of the data as basis. Parameters ---------- kernel : string or callable, default="rbf" Kernel map to be approximated. A callable should accept two arguments and the keyword arguments passed to this object as kernel_params, and should return a floating point number. n_components : int Number of features to construct. How many data points will be used to construct the mapping. gamma : float, default=None Gamma parameter for the RBF, polynomial, exponential chi2 and sigmoid kernels. Interpretation of the default value is left to the kernel; see the documentation for sklearn.metrics.pairwise. Ignored by other kernels. degree : float, default=3 Degree of the polynomial kernel. Ignored by other kernels. coef0 : float, default=1 Zero coefficient for polynomial and sigmoid kernels. Ignored by other kernels. kernel_params : mapping of string to any, optional Additional parameters (keyword arguments) for kernel function passed as callable object. random_state : {int, RandomState}, optional If int, random_state is the seed used by the random number generator; if RandomState instance, random_state is the random number generator. Attributes ---------- components_ : array, shape (n_components, n_features) Subset of training points used to construct the feature map. component_indices_ : array, shape (n_components) Indices of ``components_`` in the training set. normalization_ : array, shape (n_components, n_components) Normalization matrix needed for embedding. Square root of the kernel matrix on ``components_``. References ---------- * Williams, C.K.I. and Seeger, M. "Using the Nystroem method to speed up kernel machines", Advances in neural information processing systems 2001 * T. Yang, Y. Li, M. Mahdavi, R. Jin and Z. Zhou "Nystroem Method vs Random Fourier Features: A Theoretical and Empirical Comparison", Advances in Neural Information Processing Systems 2012 See also -------- RBFSampler : An approximation to the RBF kernel using random Fourier features. sklearn.metrics.pairwise.kernel_metrics : List of built-in kernels. """ def __init__(self, kernel="rbf", gamma=None, coef0=1, degree=3, kernel_params=None, n_components=100, random_state=None): self.kernel = kernel self.gamma = gamma self.coef0 = coef0 self.degree = degree self.kernel_params = kernel_params self.n_components = n_components self.random_state = random_state def fit(self, X, y=None): """Fit estimator to data. Samples a subset of training points, computes kernel on these and computes normalization matrix. Parameters ---------- X : array-like, shape=(n_samples, n_feature) Training data. """ X = check_array(X, accept_sparse='csr') rnd = check_random_state(self.random_state) n_samples = X.shape[0] # get basis vectors if self.n_components > n_samples: # XXX should we just bail? n_components = n_samples warnings.warn("n_components > n_samples. This is not possible.\n" "n_components was set to n_samples, which results" " in inefficient evaluation of the full kernel.") else: n_components = self.n_components n_components = min(n_samples, n_components) inds = rnd.permutation(n_samples) basis_inds = inds[:n_components] basis = X[basis_inds] basis_kernel = pairwise_kernels(basis, metric=self.kernel, filter_params=True, **self._get_kernel_params()) # sqrt of kernel matrix on basis vectors U, S, V = svd(basis_kernel) S = np.maximum(S, 1e-12) self.normalization_ = np.dot(U * 1. / np.sqrt(S), V) self.components_ = basis self.component_indices_ = inds return self def transform(self, X): """Apply feature map to X. Computes an approximate feature map using the kernel between some training points and X. Parameters ---------- X : array-like, shape=(n_samples, n_features) Data to transform. Returns ------- X_transformed : array, shape=(n_samples, n_components) Transformed data. """ check_is_fitted(self, 'components_') X = check_array(X, accept_sparse='csr') kernel_params = self._get_kernel_params() embedded = pairwise_kernels(X, self.components_, metric=self.kernel, filter_params=True, **kernel_params) return np.dot(embedded, self.normalization_.T) def _get_kernel_params(self): params = self.kernel_params if params is None: params = {} if not callable(self.kernel): params['gamma'] = self.gamma params['degree'] = self.degree params['coef0'] = self.coef0 return params
bsd-3-clause
ningchi/scikit-learn
examples/cluster/plot_color_quantization.py
295
3443
# -*- coding: utf-8 -*- """ ================================== Color Quantization using K-Means ================================== Performs a pixel-wise Vector Quantization (VQ) of an image of the summer palace (China), reducing the number of colors required to show the image from 96,615 unique colors to 64, while preserving the overall appearance quality. In this example, pixels are represented in a 3D-space and K-means is used to find 64 color clusters. In the image processing literature, the codebook obtained from K-means (the cluster centers) is called the color palette. Using a single byte, up to 256 colors can be addressed, whereas an RGB encoding requires 3 bytes per pixel. The GIF file format, for example, uses such a palette. For comparison, a quantized image using a random codebook (colors picked up randomly) is also shown. """ # Authors: Robert Layton <robertlayton@gmail.com> # Olivier Grisel <olivier.grisel@ensta.org> # Mathieu Blondel <mathieu@mblondel.org> # # License: BSD 3 clause print(__doc__) import numpy as np import matplotlib.pyplot as plt from sklearn.cluster import KMeans from sklearn.metrics import pairwise_distances_argmin from sklearn.datasets import load_sample_image from sklearn.utils import shuffle from time import time n_colors = 64 # Load the Summer Palace photo china = load_sample_image("china.jpg") # Convert to floats instead of the default 8 bits integer coding. Dividing by # 255 is important so that plt.imshow behaves works well on float data (need to # be in the range [0-1] china = np.array(china, dtype=np.float64) / 255 # Load Image and transform to a 2D numpy array. w, h, d = original_shape = tuple(china.shape) assert d == 3 image_array = np.reshape(china, (w * h, d)) print("Fitting model on a small sub-sample of the data") t0 = time() image_array_sample = shuffle(image_array, random_state=0)[:1000] kmeans = KMeans(n_clusters=n_colors, random_state=0).fit(image_array_sample) print("done in %0.3fs." % (time() - t0)) # Get labels for all points print("Predicting color indices on the full image (k-means)") t0 = time() labels = kmeans.predict(image_array) print("done in %0.3fs." % (time() - t0)) codebook_random = shuffle(image_array, random_state=0)[:n_colors + 1] print("Predicting color indices on the full image (random)") t0 = time() labels_random = pairwise_distances_argmin(codebook_random, image_array, axis=0) print("done in %0.3fs." % (time() - t0)) def recreate_image(codebook, labels, w, h): """Recreate the (compressed) image from the code book & labels""" d = codebook.shape[1] image = np.zeros((w, h, d)) label_idx = 0 for i in range(w): for j in range(h): image[i][j] = codebook[labels[label_idx]] label_idx += 1 return image # Display all results, alongside original image plt.figure(1) plt.clf() ax = plt.axes([0, 0, 1, 1]) plt.axis('off') plt.title('Original image (96,615 colors)') plt.imshow(china) plt.figure(2) plt.clf() ax = plt.axes([0, 0, 1, 1]) plt.axis('off') plt.title('Quantized image (64 colors, K-Means)') plt.imshow(recreate_image(kmeans.cluster_centers_, labels, w, h)) plt.figure(3) plt.clf() ax = plt.axes([0, 0, 1, 1]) plt.axis('off') plt.title('Quantized image (64 colors, Random)') plt.imshow(recreate_image(codebook_random, labels_random, w, h)) plt.show()
bsd-3-clause