repo_name
stringlengths
7
60
path
stringlengths
6
134
copies
stringlengths
1
3
size
stringlengths
4
6
content
stringlengths
1.04k
149k
license
stringclasses
12 values
woodscn/scipy
scipy/special/c_misc/struve_convergence.py
76
3725
""" Convergence regions of the expansions used in ``struve.c`` Note that for v >> z both functions tend rapidly to 0, and for v << -z, they tend to infinity. The floating-point functions over/underflow in the lower left and right corners of the figure. Figure legend ============= Red region Power series is close (1e-12) to the mpmath result Blue region Asymptotic series is close to the mpmath result Green region Bessel series is close to the mpmath result Dotted colored lines Boundaries of the regions Solid colored lines Boundaries estimated by the routine itself. These will be used for determining which of the results to use. Black dashed line The line z = 0.7*|v| + 12 """ from __future__ import absolute_import, division, print_function import numpy as np import matplotlib.pyplot as plt try: import mpmath except: from sympy import mpmath def err_metric(a, b, atol=1e-290): m = abs(a - b) / (atol + abs(b)) m[np.isinf(b) & (a == b)] = 0 return m def do_plot(is_h=True): from scipy.special._ufuncs import \ _struve_power_series, _struve_asymp_large_z, _struve_bessel_series vs = np.linspace(-1000, 1000, 91) zs = np.sort(np.r_[1e-5, 1.0, np.linspace(0, 700, 91)[1:]]) rp = _struve_power_series(vs[:,None], zs[None,:], is_h) ra = _struve_asymp_large_z(vs[:,None], zs[None,:], is_h) rb = _struve_bessel_series(vs[:,None], zs[None,:], is_h) mpmath.mp.dps = 50 if is_h: sh = lambda v, z: float(mpmath.struveh(mpmath.mpf(v), mpmath.mpf(z))) else: sh = lambda v, z: float(mpmath.struvel(mpmath.mpf(v), mpmath.mpf(z))) ex = np.vectorize(sh, otypes='d')(vs[:,None], zs[None,:]) err_a = err_metric(ra[0], ex) + 1e-300 err_p = err_metric(rp[0], ex) + 1e-300 err_b = err_metric(rb[0], ex) + 1e-300 err_est_a = abs(ra[1]/ra[0]) err_est_p = abs(rp[1]/rp[0]) err_est_b = abs(rb[1]/rb[0]) z_cutoff = 0.7*abs(vs) + 12 levels = [-1000, -12] plt.cla() plt.hold(1) plt.contourf(vs, zs, np.log10(err_p).T, levels=levels, colors=['r', 'r'], alpha=0.1) plt.contourf(vs, zs, np.log10(err_a).T, levels=levels, colors=['b', 'b'], alpha=0.1) plt.contourf(vs, zs, np.log10(err_b).T, levels=levels, colors=['g', 'g'], alpha=0.1) plt.contour(vs, zs, np.log10(err_p).T, levels=levels, colors=['r', 'r'], linestyles=[':', ':']) plt.contour(vs, zs, np.log10(err_a).T, levels=levels, colors=['b', 'b'], linestyles=[':', ':']) plt.contour(vs, zs, np.log10(err_b).T, levels=levels, colors=['g', 'g'], linestyles=[':', ':']) lp = plt.contour(vs, zs, np.log10(err_est_p).T, levels=levels, colors=['r', 'r'], linestyles=['-', '-']) la = plt.contour(vs, zs, np.log10(err_est_a).T, levels=levels, colors=['b', 'b'], linestyles=['-', '-']) lb = plt.contour(vs, zs, np.log10(err_est_b).T, levels=levels, colors=['g', 'g'], linestyles=['-', '-']) plt.clabel(lp, fmt={-1000: 'P', -12: 'P'}) plt.clabel(la, fmt={-1000: 'A', -12: 'A'}) plt.clabel(lb, fmt={-1000: 'B', -12: 'B'}) plt.plot(vs, z_cutoff, 'k--') plt.xlim(vs.min(), vs.max()) plt.ylim(zs.min(), zs.max()) plt.xlabel('v') plt.ylabel('z') def main(): plt.clf() plt.subplot(121) do_plot(True) plt.title('Struve H') plt.subplot(122) do_plot(False) plt.title('Struve L') plt.savefig('struve_convergence.png') plt.show() if __name__ == "__main__": import os import sys if '--main' in sys.argv: main() else: import subprocess subprocess.call([sys.executable, os.path.join('..', '..', '..', 'runtests.py'), '-g', '--python', __file__, '--main'])
bsd-3-clause
Tuyki/TT_RNN
MNISTSeq.py
1
14227
__author__ = "Yinchong Yang" __copyright__ = "Siemens AG, 2018" __licencse__ = "MIT" __version__ = "0.1" """ MIT License Copyright (c) 2018 Siemens AG Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. """ """ We first sample MNIST digits to form sequences of random lengths. The sequence is labeled as one if it contains a zero, and is labeled zero otherwise. This simulates a high dimensional sequence classification task, such as predicting therapy decision and survival of patients based on their historical clinical event information. We train plain LSTM and Tensor-Train LSTM for this task. After the training, we apply Layer-wise Relevance Propagation to identify the digit(s) that have influenced the classification. Apparently, we would expect the LRP algorithm would assign high relevance value to the zero(s) in the sequence. These experiments turn out to be successful, which demonstrates that i) the LSTM and TT-LSTM can indeed learn the mapping from a zero to the sequence class, and that ii) both LSTMs have no problem in storing the zero pattern over a period of time, because the classifier is deployed only at the last hidden state, and that iii) the implementation of the LRP algorithm, complex as it is, is also correct, in that the zeros are assigned high relevance scores. Especially the experiments with the plain LSTM serve as simulation study supporting our submission of “Yinchong Yang, Volker Tresp, Marius Wunderle, Peter A. Fasching, Explaining Therapy Predictions with Layer-wise Relevance Propagation in Neural Networks, at IEEE ICHI 2018”. The original LRP for LSTM from the repository: https://github.com/ArrasL/LRP_for_LSTM which we modified and adjusted for keras models. Feel free to experiment with the hyper parameters and suggest other sequence classification tasks. Have fun ;) """ import pickle import sys import numpy as np from numpy import newaxis as na import keras from keras.layers.recurrent import Recurrent from keras import backend as K from keras.engine import InputSpec from keras import activations from keras import initializers from keras import regularizers from keras import constraints from keras.engine.topology import Layer from TTLayer import * from TTRNN import TT_LSTM def make_seq(n, x, y, maxlen=32, seed=123): np.random.seed(seed) lens = np.random.choice(range(2, maxlen), n) seqs = np.zeros((n, maxlen, 28**2)) labels = np.zeros(n) digits_label = np.zeros((n, maxlen), dtype='int32')-1 ids = np.zeros((n, maxlen), dtype='int64')-1 for i in range(n): digits_inds = np.random.choice(range(x.shape[0]), lens[i]) ids[i, -lens[i]::] = digits_inds seqs[i, -lens[i]::, :] = x[digits_inds] digits_label[i, -lens[i]::] = y[digits_inds] class_inds = y[digits_inds] if True: # option 1: is there any 0 in the sequence? labels[i] = (0 in class_inds) else: # option 2: even number of 0 -> label=0, odd number of 0 -> label=1 labels[i] = len(np.where(class_inds == 0)[0]) % 2 == 1 return [seqs, labels, digits_label, ids] # From: https://github.com/ArrasL/LRP_for_LSTM def lrp_linear(hin, w, b, hout, Rout, bias_nb_units, eps, bias_factor, debug=False): """ LRP for a linear layer with input dim D and output dim M. Args: - hin: forward pass input, of shape (D,) - w: connection weights, of shape (D, M) - b: biases, of shape (M,) - hout: forward pass output, of shape (M,) (unequal to np.dot(w.T,hin)+b if more than one incoming layer!) - Rout: relevance at layer output, of shape (M,) - bias_nb_units: number of lower-layer units onto which the bias/stabilizer contribution is redistributed - eps: stabilizer (small positive number) - bias_factor: for global relevance conservation set to 1.0, otherwise 0.0 to ignore bias redistribution Returns: - Rin: relevance at layer input, of shape (D,) """ sign_out = np.where(hout[na, :] >= 0, 1., -1.) # shape (1, M) numer = (w * hin[:, na]) + \ ((bias_factor * b[na, :] * 1. + eps * sign_out * 1.) * 1. / bias_nb_units) # shape (D, M) denom = hout[na, :] + (eps * sign_out * 1.) # shape (1, M) message = (numer / denom) * Rout[na, :] # shape (D, M) Rin = message.sum(axis=1) # shape (D,) # Note: local layer relevance conservation if bias_factor==1.0 and bias_nb_units==D # global network relevance conservation if bias_factor==1.0 (can be used for sanity check) if debug: print("local diff: ", Rout.sum() - Rin.sum()) return Rin def sigmoid(x): x = x.astype('float128') return 1. / (1. + np.exp(-x)) # Modified from https://github.com/ArrasL/LRP_for_LSTM def lstm_lrp(l, d, train_data = True): if train_data: x_l = X_tr[l] y_l = Y_tr[l] z_l = Z_tr[l] # d_l = d_tr[l] else: x_l = X_te[l] y_l = Y_te[l] z_l = Z_te[l] # d_l = d_te[l] # calculate the FF pass in LSTM for every time step pre_gates = np.zeros((MAXLEN, d*4)) gates = np.zeros((MAXLEN, d * 4)) h = np.zeros((MAXLEN, d)) c = np.zeros((MAXLEN, d)) for t in range(MAXLEN): z = np.dot(x_l[t], Ws) if t > 0: z += np.dot(h[t-1], Us) z += b pre_gates[t] = z z0 = z[0:d] z1 = z[d:2*d] z2 = z[2*d:3*d] z3 = z[3 * d::] i = sigmoid(z0) f = sigmoid(z1) c[t] = f * c[t-1] + i * np.tanh(z2) o = sigmoid(z3) h[t] = o * np.tanh(c[t]) gates[t] = np.concatenate([i, f, np.tanh(z2), o]) # check: z_l[12] / h[-1][12] Rh = np.zeros((MAXLEN, d)) Rc = np.zeros((MAXLEN, d)) Rg = np.zeros((MAXLEN, d)) Rx = np.zeros((MAXLEN, 28**2)) bias_factor = 0 Rh[MAXLEN-1] = lrp_linear(hin=z_l, w=Dense_w, b=np.array(Dense_b), hout=np.dot(z_l, Dense_w)+Dense_b, Rout=np.array([y_l]), bias_nb_units=len(z_l), eps=eps, bias_factor=bias_factor) for t in reversed(range(MAXLEN)): # t = MAXLEN-1 # print t Rc[t] += Rh[t] # Rc[t] = Rh[t] if t > 0: Rc[t-1] = lrp_linear(gates[t, d: 2 * d] * c[t - 1], # gates[t , 2 *d: 3 *d ] *c[ t -1], np.identity(d), np.zeros((d)), c[t], Rc[t], 2*d, eps, bias_factor, debug=False) Rg[t] = lrp_linear(gates[t, 0:d] * gates[t, 2*d:3*d], # h_input: i + g np.identity(d), # W np.zeros((d)), # b c[t], # h_output Rc[t], # R_output 2 * d, eps, bias_factor, debug=False) # foo = np.dot(x_l[t], Ws[:,2*d:3*d]) + np.dot(h[t-1], Us[:, 2*d:3*d]) + b[2*d:3*d] Rx[t] = lrp_linear(x_l[t], Ws[:,2*d:3*d], b[2*d:3*d], pre_gates[t, 2*d:3*d], Rg[t], d + 28 ** 2, eps, bias_factor, debug=False) if t > 0: Rh[t-1] = lrp_linear(h[t-1], Us[:,2*d:3*d], b[2*d:3*d], pre_gates[t, 2 * d:3 * d], Rg[t], d + 28**2, eps, bias_factor, debug=False) # hin, w, b, hout, Rout, bias_nb_units, eps, bias_factor, debug=False # Rx[np.where(d_l==-1.)[0]] *= 0 return Rx from keras.datasets import mnist from keras.utils import to_categorical from keras.models import Model, Input from keras.layers import Dense, GRU, LSTM, Dropout, Masking from keras.optimizers import * from keras.regularizers import l2 from sklearn.metrics import * # Script configurations ################################################################### seed=111111 use_TT = True # whether use Tensor-Train or plain RNNs # Prepare the data ######################################################################## # Load the MNIST data and build sequences: (x_train, y_train), (x_test, y_test) = mnist.load_data() x_train = x_train.reshape(x_train.shape[0], -1) x_test = x_test.reshape(x_test.shape[0], -1) MAXLEN = 32 # max length of the sequences X_tr, Y_tr, d_tr, idx_tr = make_seq(n=10000, x=x_train, y=y_train, maxlen=MAXLEN, seed=seed) X_te, Y_te, d_te, idx_te = make_seq(n=1000, x=x_test, y=y_test, maxlen=MAXLEN, seed=seed+1) # Define the model ###################################################################### if use_TT: # TT settings tt_input_shape = [7, 7, 16] tt_output_shape = [4, 4, 4] tt_ranks = [1, 4, 4, 1] rnn_size = 64 X = Input(shape=X_tr.shape[1::]) X_mask = Masking(mask_value=0.0, input_shape=X_tr.shape[1::])(X) if use_TT: Z = TT_LSTM(tt_input_shape=tt_input_shape, tt_output_shape=tt_output_shape, tt_ranks=tt_ranks, return_sequences=False, recurrent_dropout=.5)(X_mask) Out = Dense(units=1, activation='sigmoid', kernel_regularizer=l2(1e-2))(Z) else: Z = LSTM(units=rnn_size, return_sequences=False, recurrent_dropout=.5)(X_mask) # dropout=.5, Out = Dense(units=1, activation='sigmoid', kernel_regularizer=l2(1e-2))(Z) rnn_model = Model(X, Out) rnn_model.compile(optimizer=Adam(1e-3), loss='binary_crossentropy', metrics=['accuracy']) # Train the model and save the results ###################################################### rnn_model.fit(X_tr, Y_tr, epochs=50, batch_size=32, validation_split=.2, verbose=2) Y_hat = rnn_model.predict(X_tr, verbose=2).reshape(-1) train_acc = (np.round(Y_hat) == Y_tr).mean() Y_pred = rnn_model.predict(X_te, verbose=2).reshape(-1) (np.round(Y_pred) == Y_te).mean() pred_acc = (np.round(Y_pred) == Y_te).mean() # Collect all hidden layers ################################################################ if use_TT: # Reconstruct the fully connected input-to-hidden weights: from keras.initializers import constant _tt_output_shape = np.copy(tt_output_shape) _tt_output_shape[0] *= 4 fc_w = rnn_model.get_weights()[0] fc_layer = TT_Layer(tt_input_shape=tt_input_shape, tt_output_shape=_tt_output_shape, tt_ranks=tt_ranks, kernel_initializer=constant(value=fc_w), use_bias=False) fc_input = Input(shape=(X_tr.shape[2],)) fc_output = fc_layer(fc_input) fc_model = Model(fc_input, fc_output) fc_model.compile('sgd', 'mse') fc_recon_mat = fc_model.predict(np.identity(X_tr.shape[2])) # Reconstruct the entire LSTM: fc_Z = LSTM(units=np.prod(tt_output_shape), return_sequences=False, dropout=.5, recurrent_dropout=.5, weights=[fc_recon_mat, rnn_model.get_weights()[2], rnn_model.get_weights()[1]])(X_mask) else: fc_Z = LSTM(units=rnn_size, return_sequences=False, dropout=.5, recurrent_dropout=.5, weights=rnn_model.get_weights()[0:3])(X_mask) fc_Out = Dense(units=1, activation='sigmoid', kernel_regularizer=l2(1e-3), weights=rnn_model.get_weights()[3::])(fc_Z) fc_rnn_model = Model(X, fc_Out) fc_rnn_model.compile(optimizer=Adam(1e-3), loss='binary_crossentropy', metrics=['accuracy']) fc_rnn_model.evaluate(X_te, Y_te, verbose=2) # Calculate the LRP: ######################################################################### fc_Z_model = Model(X, fc_Z) fc_Z_model.compile('sgd', 'mse') Y_hat_fc = fc_rnn_model.predict(X_tr) Y_pred_fc = fc_rnn_model.predict(X_te) Ws = fc_rnn_model.get_weights()[0] Us = fc_rnn_model.get_weights()[1] b = fc_rnn_model.get_weights()[2] Dense_w = fc_rnn_model.get_weights()[3] Dense_b = fc_rnn_model.get_weights()[4] Z_tr = fc_Z_model.predict(X_tr) Z_te = fc_Z_model.predict(X_te) eps = 1e-4 is_number_flag = np.where(d_te != -1) # All relevance scores of the test sequences lrp_te = np.vstack([lstm_lrp(i, rnn_size, False).sum(1) for i in range(X_te.shape[0])]) lrp_auroc = roc_auc_score((d_te == 0).astype('int')[is_number_flag].reshape(-1), lrp_te[is_number_flag].reshape(-1)) lrp_auprc = average_precision_score((d_te == 0).astype('int')[is_number_flag].reshape(-1), lrp_te[is_number_flag].reshape(-1)) # The reported results: print pred_acc print lrp_auroc print lrp_auprc
mit
breznak/NAB
nab/labeler.py
8
16181
# ---------------------------------------------------------------------- # Copyright (C) 2014-2015, Numenta, Inc. Unless you have an agreement # with Numenta, Inc., for a separate license for this software code, the # following terms and conditions apply: # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero Public License version 3 as # published by the Free Software Foundation. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. # See the GNU Affero Public License for more details. # # You should have received a copy of the GNU Affero Public License # along with this program. If not, see http://www.gnu.org/licenses. # # http://numenta.org/licenses/ # ---------------------------------------------------------------------- import datetime import itertools import numpy import os import pandas try: import simplejson as json except ImportError: import json from nab.util import (absoluteFilePaths, getProbationPeriod, strf, strp, deepmap, createPath, writeJSON) def bucket(rawTimes, buffer): """ Buckets (groups) timestamps that are within the amount of time specified by buffer. """ bucket = [] rawBuckets = [] current = None for t in rawTimes: if current is None: current = t bucket = [current] continue if (t - current) <= buffer: bucket.append(t) else: rawBuckets.append(bucket) current = t bucket = [current] if bucket: rawBuckets.append(bucket) return rawBuckets def merge(rawBuckets, threshold): """ Merges bucketed timestamps into one timestamp (most frequent, or earliest). """ truths = [] passed = [] for bucket in rawBuckets: if len(bucket) >= threshold: truths.append(max(bucket, key=bucket.count)) else: passed.append(bucket) return truths, passed def checkForOverlap(labels, buffer, labelsFileName, dataFileName): """ Raise a ValueError if the difference between any consecutive labels is smaller than the buffer. """ for i in xrange(len(labels)-1): if labels[i+1] - labels[i] <= buffer: # import pdb; pdb.set_trace() raise ValueError("The labels {} and {} in \'{}\' labels for data file " "\'{}\' are too close to each other to be considered distinct " "anomalies. Please relabel." .format(labels[i], labels[i+1], labelsFileName, dataFileName)) class CorpusLabel(object): """ Class to store and manipulate a single set of labels for the whole benchmark corpus. """ def __init__(self, path, corpus): """ Initializes a CorpusLabel object by getting the anomaly windows and labels. When this is done for combining raw user labels, we skip getLabels() because labels are not yet created. @param path (string) Name of file containing the set of labels. @param corpus (nab.Corpus) Corpus object. """ self.path = path self.windows = None self.labels = None self.corpus = corpus self.getWindows() if "raw" not in self.path: # Do not get labels from files in the path nab/labels/raw self.getLabels() def getWindows(self): """ Read JSON label file. Get timestamps as dictionaries with key:value pairs of a relative path and its corresponding list of windows. """ def found(t, data): f = data["timestamp"][data["timestamp"] == pandas.tslib.Timestamp(t)] exists = (len(f) == 1) return exists with open(os.path.join(self.path)) as windowFile: windows = json.load(windowFile) self.windows = {} for relativePath in windows.keys(): self.windows[relativePath] = deepmap(strp, windows[relativePath]) if len(self.windows[relativePath]) == 0: continue data = self.corpus.dataFiles[relativePath].data if "raw" in self.path: timestamps = windows[relativePath] else: timestamps = list(itertools.chain.from_iterable(windows[relativePath])) # Check that timestamps are present in dataset if not all([found(t,data) for t in timestamps]): raise ValueError("In the label file %s, one of the timestamps used for " "the datafile %s doesn't match; it does not exist in " "the file. Timestamps in json label files have to " "exactly match timestamps in corresponding datafiles." % (self.path, relativePath)) def validateLabels(self): """ This is run at the end of the label combining process (see scripts/combine_labels.py) to validate the resulting ground truth windows, specifically that they are distinct (unique, non-overlapping). """ with open(os.path.join(self.path)) as windowFile: windows = json.load(windowFile) self.windows = {} for relativePath in windows.keys(): self.windows[relativePath] = deepmap(strp, windows[relativePath]) if len(self.windows[relativePath]) == 0: continue num_windows = len(self.windows[relativePath]) if num_windows > 1: if not all([(self.windows[relativePath][i+1][0] - self.windows[relativePath][i][1]).total_seconds() >= 0 for i in xrange(num_windows-1)]): raise ValueError("In the label file %s, windows overlap." % self.path) def getLabels(self): """ Get Labels as a dictionary of key-value pairs of a relative path and its corresponding binary vector of anomaly labels. Labels are simply a more verbose version of the windows. """ self.labels = {} for relativePath, dataSet in self.corpus.dataFiles.iteritems(): if self.windows.has_key(relativePath): windows = self.windows[relativePath] labels = pandas.DataFrame({"timestamp": dataSet.data["timestamp"]}) labels['label'] = 0 for t1, t2 in windows: moreThanT1 = labels[labels["timestamp"] >= t1] betweenT1AndT2 = moreThanT1[moreThanT1["timestamp"] <= t2] indices = betweenT1AndT2.loc[:,"label"].index labels["label"].values[indices.values] = 1 self.labels[relativePath] = labels else: print "Warning: no label for datafile",relativePath class LabelCombiner(object): """ This class is used to combine labels from multiple human labelers, and the set of manual labels (known anomalies). The output is a single ground truth label file containing anomalies where there is enough human agreement. The class also computes the window around each anomaly. The exact logic is described elsewhere in the NAB documentation. """ def __init__(self, labelDir, corpus, threshold, windowSize, probationaryPercent, verbosity): """ @param labelDir (string) A directory name containing user label files. This directory should contain one label file per human labeler. @param corpus (Corpus) Instance of Corpus class. @param threshold (float) A percentage between 0 and 1, specifying the agreement threshold. It describes the level of agreement needed between individual labelers before a particular point in a data file is labeled as anomalous in the combined file. @param windowSize (float) Estimated size of an anomaly window, as a ratio the dataset length. @param verbosity (int) 0, 1, or 2 to print out select labeling metrics; 0 is none, 2 is the most. """ self.labelDir = labelDir self.corpus = corpus self.threshold = threshold self.windowSize = windowSize self.probationaryPercent = probationaryPercent self.verbosity = verbosity self.userLabels = None self.nLabelers = None self.knownLabels = None self.combinedWindows = None def __str__(self): ans = "" ans += "labelDir: %s\n" % self.labelDir ans += "corpus: %s\n" % self.corpus ans += "number of labelers: %d\n" % self.nLabelers ans += "agreement threshold: %d\n" % self.threshold return ans def write(self, labelsPath, windowsPath): """Write the combined labels and windows to destination directories.""" if not os.path.isdir(labelsPath): createPath(labelsPath) if not os.path.isdir(windowsPath): createPath(windowsPath) writeJSON(labelsPath, self.labelTimestamps) writeJSON(windowsPath, self.combinedWindows) def combine(self): """Combine raw and known labels in anomaly windows.""" self.getRawLabels() self.combineLabels() self.editPoorLabels() self.applyWindows() self.checkWindows() def getRawLabels(self): """Collect the raw user labels from specified directory.""" labelPaths = absoluteFilePaths(self.labelDir) self.userLabels = [] self.knownLabels = [] for path in labelPaths: if "known" in path: self.knownLabels.append(CorpusLabel(path, self.corpus)) else: self.userLabels.append(CorpusLabel(path, self.corpus)) self.nLabelers = len(self.userLabels) if self.nLabelers == 0: raise ValueError("No users labels found") def combineLabels(self): """ Combines raw user labels to create set of true anomaly labels. A buffer is used to bucket labels that identify the same anomaly. The buffer is half the estimated window size of an anomaly -- approximates an average of two anomalies per dataset, and no window can have > 1 anomaly. After bucketing, a label becomes a true anomaly if it was labeled by a proportion of the users greater than the defined threshold. Then the bucket is merged into one timestamp -- the ground truth label. The set of known anomaly labels are added as well. These have been manually labeled because we know the direct causes of the anomalies. They are added as if they are the result of the bucket-merge process. If verbosity > 0, the dictionary passedLabels -- the raw labels that did not pass the threshold qualification -- is printed to the console. """ def setTruthLabels(dataSet, trueAnomalies): """Returns the indices of the ground truth anomalies for a data file.""" timestamps = dataSet.data["timestamp"] labels = numpy.array(timestamps.isin(trueAnomalies), dtype=int) return [i for i in range(len(labels)) if labels[i]==1] self.labelTimestamps = {} self.labelIndices = {} for relativePath, dataSet in self.corpus.dataFiles.iteritems(): if ("Known" in relativePath) or ("artificial" in relativePath): knownAnomalies = self.knownLabels[0].windows[relativePath] self.labelTimestamps[relativePath] = [str(t) for t in knownAnomalies] self.labelIndices[relativePath] = setTruthLabels(dataSet, knownAnomalies) continue # Calculate the window buffer -- used for bucketing labels identifying # the same anomaly. granularity = dataSet.data["timestamp"][1] - dataSet.data["timestamp"][0] buffer = datetime.timedelta(minutes= granularity.total_seconds()/60 * len(dataSet.data) * self.windowSize/10) rawTimesLists = [] userCount = 0 for user in self.userLabels: if relativePath in user.windows: # the user has labels for this file checkForOverlap( user.windows[relativePath], buffer, user.path, relativePath) rawTimesLists.append(user.windows[relativePath]) userCount += 1 if not rawTimesLists: # no labeled anomalies for this data file self.labelTimestamps[relativePath] = [] self.labelIndices[relativePath] = setTruthLabels(dataSet, []) continue else: rawTimes = list(itertools.chain.from_iterable(rawTimesLists)) rawTimes.sort() # Bucket and merge the anomaly timestamps. threshold = userCount * self.threshold trueAnomalies, passedAnomalies = merge( bucket(rawTimes, buffer), threshold) self.labelTimestamps[relativePath] = [str(t) for t in trueAnomalies] self.labelIndices[relativePath] = setTruthLabels(dataSet, trueAnomalies) if self.verbosity>0: print "----" print "For %s the passed raw labels and qualified true labels are,"\ " respectively:" % relativePath print passedAnomalies print trueAnomalies return self.labelTimestamps, self.labelIndices def editPoorLabels(self): """ This edits labels that have been flagged for manual revision. From inspecting the data and anomaly windows, we have determined some combined labels should be revised, or not included in the ground truth labels. """ count = 0 for relativePath, indices in self.labelIndices.iteritems(): if "iio_us-east-1_i-a2eb1cd9_NetworkIn" in relativePath: self.labelIndices[relativePath] = [249, 339] count += len(indices) if self.verbosity > 0: print "=============================================================" print "Total ground truth anomalies in benchmark dataset =", count def applyWindows(self): """ This takes all the true anomalies, as calculated by combineLabels(), and adds a standard window. The window length is the class variable windowSize, and the location is centered on the anomaly timestamp. If verbosity = 2, the window metrics are printed to the console. """ allWindows = {} for relativePath, anomalies in self.labelIndices.iteritems(): data = self.corpus.dataFiles[relativePath].data length = len(data) num = len(anomalies) if num: windowLength = int(self.windowSize * length / len(anomalies)) else: windowLength = int(self.windowSize * length) if self.verbosity==2: print "----" print "Window metrics for file", relativePath print "file length =", length, ";" \ "number of windows =", num, ";" \ "window length =", windowLength windows = [] for a in anomalies: front = max(a - windowLength/2, 0) back = min(a + windowLength/2, length-1) windowLimit = [strf(data["timestamp"][front]), strf(data["timestamp"][back])] windows.append(windowLimit) allWindows[relativePath] = windows self.combinedWindows = allWindows def checkWindows(self): """ This takes the anomaly windows and checks for overlap with both each other and with the probationary period. Overlapping windows are merged into a single window. Windows overlapping with the probationary period are deleted. """ for relativePath, windows in self.combinedWindows.iteritems(): numWindows = len(windows) if numWindows > 0: fileLength = self.corpus.dataFiles[relativePath].data.shape[0] probationIndex = getProbationPeriod( self.probationaryPercent, fileLength) probationTimestamp = self.corpus.dataFiles[relativePath].data[ "timestamp"][probationIndex] if (pandas.to_datetime(windows[0][0]) -probationTimestamp).total_seconds() < 0: del windows[0] print ("The first window in {} overlaps with the probationary period " ", so we're deleting it.".format(relativePath)) i = 0 while len(windows)-1 > i: if (pandas.to_datetime(windows[i+1][0]) - pandas.to_datetime(windows[i][1])).total_seconds() <= 0: # merge windows windows[i] = [windows[i][0], windows[i+1][1]] del windows[i+1] i += 1
agpl-3.0
pvcrossi/OnlineCS
online_CS.py
1
4043
''' Bayesian Online Compressed Sensing (2016) Paulo V. Rossi & Yoshiyuki Kabashima ''' from collections import namedtuple import matplotlib.pyplot as plt import numpy as np from numpy.linalg import norm from numpy.random import normal from utils import DlnH, DDlnH, G, H, moments def simulation(method='standard'): signal_length = 2000 alpha_max = 20 sigma_n_2 = 1e-1 phi = prior() P = posterior(signal_length, phi) x0 = generate_signal(signal_length, phi) print('Simulation parameters:') print('N='+str(signal_length)+', sparsity='+str(phi.rho)+ ', noise='+str(sigma_n_2)+', alpha_max='+str(alpha_max)) print('Measurement model: '+method+'\n') number_of_measurements = alpha_max*signal_length mean_square_error = np.zeros(number_of_measurements) for measurement in range(number_of_measurements): P = update_posterior(P, phi, x0, signal_length, sigma_n_2, method) mean_square_error[measurement] = reconstruction_error(P, x0) plot_results(P, x0, mean_square_error, phi) def prior(): phi = namedtuple('prior_distribution', ['rho', 'sigma_x_2', 'bar_x']) phi.rho = 0.1 phi.sigma_x_2 = 1. phi.bar_x = 0. return phi def posterior(signal_length, phi): P = namedtuple('posterior_distribution', ['m', 'v', 'a', 'h']) P.m = np.zeros(signal_length) P.v = phi.rho * phi.sigma_x_2 * np.ones(signal_length) P.a = np.zeros(signal_length) P.h = np.zeros(signal_length) return P def generate_signal (signal_length, phi): x0 = np.zeros(signal_length) number_of_non_zero_components = int(np.ceil(signal_length*phi.rho)) x0[:number_of_non_zero_components] = normal(loc=phi.bar_x, scale=np.sqrt(phi.sigma_x_2), size=number_of_non_zero_components) return x0 def update_posterior(P, phi, x0, signal_length, sigma_n_2, method): A_t = measurement_vector(signal_length) P.a, P.h = update_and_project(method, A_t, x0, sigma_n_2, P) P.m, P.v = moments(P, phi) return P def measurement_vector(signal_length): A_t = normal(size=signal_length) return A_t/norm(A_t) def update_and_project(method, A_t, x0, sigma_n_2, P): m, v, a, h = P.m, P.v, P.a, P.h u0 = np.dot(A_t, x0) if sigma_n_2 > 0: noise = normal(scale=np.sqrt(sigma_n_2)) else: noise = 0 y = u0 + noise Delta = np.dot(A_t, m) chi = np.dot(A_t**2, v) if method == 'standard': da, dh = update_and_project_std(y, Delta, chi, sigma_n_2, A_t, m) elif method == '1bit': da, dh = update_and_project_1bit(y, Delta, chi, sigma_n_2, A_t, m) else: raise ValueError('Measurement model not recognized. Please use "standard" or "1bit".') return a+da, h+dh def update_and_project_std(y, Delta, chi, sigma_n_2, A_t, m): da = A_t**2 / (sigma_n_2 + chi) dh = (y-Delta)*A_t / (sigma_n_2 + chi) + da*m return da, dh def update_and_project_1bit(y, Delta, chi, sigma_n_2, A_t, m): y = np.sign(y) u = y * np.dot(A_t, m) chi_prime = chi + sigma_n_2 z = -u/np.sqrt(chi_prime) da = -A_t**2/chi_prime * DDlnH(z) dh = -y*A_t/np.sqrt(chi_prime) * DlnH(z) + da*m return da, dh def reconstruction_error(P, x0): return norm(x0 - P.m)**2 / norm(x0)**2 def plot_results(P, x0, mse_t, phi): plt.subplots(figsize=(10,20)) plt.subplot(211) plt.plot(np.arange(len(mse_t))/float(len(P.m)), 10*np.log10(mse_t), color='k') plt.xlabel(r'$\alpha$') plt.ylabel(r'mse (dB)') plt.subplot(212) plt.plot(P.m, color='k', lw = 0.7, label=r'$m$') plt.scatter(range(int(len(x0)*phi.rho)), x0[:int(len(x0)*phi.rho)], \ marker='o', facecolors='none', edgecolors='r', lw=1.5, label=r'$x^0$') plt.xlim([0,len(P.m)]) plt.xlabel(r'Vector Component') plt.legend() plt.show() if __name__ == '__main__': simulation(method='1bit') #simulation(method='standard')
mit
waynenilsen/statsmodels
statsmodels/sandbox/examples/ex_kaplan_meier.py
33
2838
#An example for the Kaplan-Meier estimator from __future__ import print_function from statsmodels.compat.python import lrange import statsmodels.api as sm import matplotlib.pyplot as plt import numpy as np from statsmodels.sandbox.survival2 import KaplanMeier #Getting the strike data as an array dta = sm.datasets.strikes.load() print('basic data') print('\n') dta = list(dta.values()[-1]) print(dta[lrange(5),:]) print('\n') #Create the KaplanMeier object and fit the model km = KaplanMeier(dta,0) km.fit() #show the results km.plot() print('basic model') print('\n') km.summary() print('\n') #Mutiple survival curves km2 = KaplanMeier(dta,0,exog=1) km2.fit() print('more than one curve') print('\n') km2.summary() print('\n') km2.plot() #with censoring censoring = np.ones_like(dta[:,0]) censoring[dta[:,0] > 80] = 0 dta = np.c_[dta,censoring] print('with censoring') print('\n') print(dta[lrange(5),:]) print('\n') km3 = KaplanMeier(dta,0,exog=1,censoring=2) km3.fit() km3.summary() print('\n') km3.plot() #Test for difference of survival curves log_rank = km3.test_diff([0.0645,-0.03957]) print('log rank test') print('\n') print(log_rank) print('\n') #The zeroth element of log_rank is the chi-square test statistic #for the difference between the survival curves for exog = 0.0645 #and exog = -0.03957, the index one element is the degrees of freedom for #the test, and the index two element is the p-value for the test wilcoxon = km3.test_diff([0.0645,-0.03957], rho=1) print('Wilcoxon') print('\n') print(wilcoxon) print('\n') #Same info as log_rank, but for Peto and Peto modification to the #Gehan-Wilcoxon test #User specified functions for tests #A wider range of rates can be accessed by using the 'weight' parameter #for the test_diff method #For example, if the desire weights are S(t)*(1-S(t)), where S(t) is a pooled #estimate for the survival function, this could be computed by doing def weights(t): #must accept one arguement, even though it is not used here s = KaplanMeier(dta,0,censoring=2) s.fit() s = s.results[0][0] s = s * (1 - s) return s #KaplanMeier provides an array of times to the weighting function #internally, so the weighting function must accept one arguement test = km3.test_diff([0.0645,-0.03957], weight=weights) print('user specified weights') print('\n') print(test) print('\n') #Groups with nan names #These can be handled by passing the data to KaplanMeier as an array of strings groups = np.ones_like(dta[:,1]) groups = groups.astype('S4') groups[dta[:,1] > 0] = 'high' groups[dta[:,1] <= 0] = 'low' dta = dta.astype('S4') dta[:,1] = groups print('with nan group names') print('\n') print(dta[lrange(5),:]) print('\n') km4 = KaplanMeier(dta,0,exog=1,censoring=2) km4.fit() km4.summary() print('\n') km4.plot() #show all the plots plt.show()
bsd-3-clause
cemarchi/biosphere
Src/BioAnalyzer/Analysis/GenePrioritization/Steps/DataIntegration/IntermediateRepresentation/Transformers/MicroRnaToGeneTransformer.py
1
4546
import math import statistics from itertools import groupby from random import randint from typing import Dict, Tuple, Counter import pandas as pd from Src.BioAnalyzer.Analysis.GenePrioritization.Steps.DataIntegration.IntermediateRepresentation.Generators import \ IntermediateRepresentationGeneratorBase from Src.BioAnalyzer.Analysis.GenePrioritization.Steps.DataIntegration.IntermediateRepresentation.Transformers.SampleTransformerBase import \ SampleTransformerBase from Src.BioDataManagement.CrossCutting.DTOs.ExpressionLevelStatusDto import ExpressionLevelStatusDto class MicroRnaToGeneTransformer(SampleTransformerBase): """ """ def __init__(self, intermediateRepresentationGenerator: IntermediateRepresentationGeneratorBase, get_global_diff_values_action, get_mirna_gene_target_action): super().__init__(intermediateRepresentationGenerator) self.__get_mirna_gene_target_action = get_mirna_gene_target_action self.__get_global_diff_values_action = get_global_diff_values_action def transform(self, from_sample_matrix: pd.DataFrame, is_highly_significant: bool) -> Tuple[pd.DataFrame, Dict[int, ExpressionLevelStatusDto]]: mirna_gene_targets = {mirna.lower(): g for mirna, g in self.__get_mirna_gene_targets(from_sample_matrix.columns.tolist()).items()} mirna_samples = self.__get_mirna_samples(from_sample_matrix, mirna_gene_targets) id_entrez_list = list(set([id_entrez for mirna_symbol, id_entrez_list in mirna_gene_targets.items() for id_entrez in id_entrez_list])) measure_matrix = dict([(g, []) for g in id_entrez_list]) key_func = lambda gene: gene[0] for patient_id, exp_values in mirna_samples.items(): gene_values = [(id_entrez, exp_value) for mirna_symbol, exp_value in exp_values.items() for id_entrez in mirna_gene_targets[mirna_symbol]] gene_values = sorted(gene_values, key=key_func) for id_entrez, measures in groupby(gene_values, key_func): measures = [measure for id_entrez, measure in list(measures) if not math.isnan(measure)] measure_matrix[id_entrez].append(float('NaN') if not measures else statistics.mean(measures)) gene_matrix = pd.DataFrame.from_dict(measure_matrix).dropna(axis=1,how='all') gene_matrix = self.intermediateRepresentationGenerator.generate(gene_matrix).dropna(axis=1,how='all') return gene_matrix, \ self.__get_gene_status(mirna_gene_targets, gene_matrix.columns.tolist(), is_highly_significant) def __get_mirna_gene_targets(self, mirnas): gene_targets = {} fe_target = self.__get_mirna_gene_target_action(mirnas) gene_targets.update(dict([(t.microrna_symbol, list(set(gene_targets[t.microrna_symbol] + t.id_entrez_genes))) if t.microrna_symbol in gene_targets else (t.microrna_symbol, t.id_entrez_genes) for t in fe_target.result_list])) return gene_targets def __get_mirna_samples(self, from_sample_matrix, mirna_gene_targets): from_sample_matrix = from_sample_matrix[list(mirna_gene_targets.keys()) + ['patient_id']] from_sample_matrix.set_index("patient_id", drop=True, inplace=True) return from_sample_matrix.to_dict(orient="index") def __get_gene_status(self, mirna_gene_targets, genes, is_highly_significant): diff_mirna = [diff for diff in self.__get_global_diff_values_action(is_highly_significant).result.values if diff.element_id in mirna_gene_targets] genes_status = [(g, diff.status) for diff in diff_mirna for g in mirna_gene_targets[diff.element_id] if g in genes] key_func = lambda gene: gene[0] genes_status = sorted(genes_status, key=key_func) genes_status_dict = {} for id_entrez, status in groupby(genes_status, key_func): status = list(status) status_counter = Counter(status) status = [k for k, v in status_counter.most_common()] len_status = len(status) - 1 genes_status_dict[id_entrez] = status[0] if len_status == 1 else status[randint(0, len_status)] return dict([(entrez_id, status[1]) for entrez_id, status in genes_status_dict.items()])
bsd-3-clause
PatrickOReilly/scikit-learn
examples/plot_johnson_lindenstrauss_bound.py
67
7474
r""" ===================================================================== The Johnson-Lindenstrauss bound for embedding with random projections ===================================================================== The `Johnson-Lindenstrauss lemma`_ states that any high dimensional dataset can be randomly projected into a lower dimensional Euclidean space while controlling the distortion in the pairwise distances. .. _`Johnson-Lindenstrauss lemma`: https://en.wikipedia.org/wiki/Johnson%E2%80%93Lindenstrauss_lemma Theoretical bounds ================== The distortion introduced by a random projection `p` is asserted by the fact that `p` is defining an eps-embedding with good probability as defined by: .. math:: (1 - eps) \|u - v\|^2 < \|p(u) - p(v)\|^2 < (1 + eps) \|u - v\|^2 Where u and v are any rows taken from a dataset of shape [n_samples, n_features] and p is a projection by a random Gaussian N(0, 1) matrix with shape [n_components, n_features] (or a sparse Achlioptas matrix). The minimum number of components to guarantees the eps-embedding is given by: .. math:: n\_components >= 4 log(n\_samples) / (eps^2 / 2 - eps^3 / 3) The first plot shows that with an increasing number of samples ``n_samples``, the minimal number of dimensions ``n_components`` increased logarithmically in order to guarantee an ``eps``-embedding. The second plot shows that an increase of the admissible distortion ``eps`` allows to reduce drastically the minimal number of dimensions ``n_components`` for a given number of samples ``n_samples`` Empirical validation ==================== We validate the above bounds on the digits dataset or on the 20 newsgroups text document (TF-IDF word frequencies) dataset: - for the digits dataset, some 8x8 gray level pixels data for 500 handwritten digits pictures are randomly projected to spaces for various larger number of dimensions ``n_components``. - for the 20 newsgroups dataset some 500 documents with 100k features in total are projected using a sparse random matrix to smaller euclidean spaces with various values for the target number of dimensions ``n_components``. The default dataset is the digits dataset. To run the example on the twenty newsgroups dataset, pass the --twenty-newsgroups command line argument to this script. For each value of ``n_components``, we plot: - 2D distribution of sample pairs with pairwise distances in original and projected spaces as x and y axis respectively. - 1D histogram of the ratio of those distances (projected / original). We can see that for low values of ``n_components`` the distribution is wide with many distorted pairs and a skewed distribution (due to the hard limit of zero ratio on the left as distances are always positives) while for larger values of n_components the distortion is controlled and the distances are well preserved by the random projection. Remarks ======= According to the JL lemma, projecting 500 samples without too much distortion will require at least several thousands dimensions, irrespective of the number of features of the original dataset. Hence using random projections on the digits dataset which only has 64 features in the input space does not make sense: it does not allow for dimensionality reduction in this case. On the twenty newsgroups on the other hand the dimensionality can be decreased from 56436 down to 10000 while reasonably preserving pairwise distances. """ print(__doc__) import sys from time import time import numpy as np import matplotlib.pyplot as plt from sklearn.random_projection import johnson_lindenstrauss_min_dim from sklearn.random_projection import SparseRandomProjection from sklearn.datasets import fetch_20newsgroups_vectorized from sklearn.datasets import load_digits from sklearn.metrics.pairwise import euclidean_distances # Part 1: plot the theoretical dependency between n_components_min and # n_samples # range of admissible distortions eps_range = np.linspace(0.1, 0.99, 5) colors = plt.cm.Blues(np.linspace(0.3, 1.0, len(eps_range))) # range of number of samples (observation) to embed n_samples_range = np.logspace(1, 9, 9) plt.figure() for eps, color in zip(eps_range, colors): min_n_components = johnson_lindenstrauss_min_dim(n_samples_range, eps=eps) plt.loglog(n_samples_range, min_n_components, color=color) plt.legend(["eps = %0.1f" % eps for eps in eps_range], loc="lower right") plt.xlabel("Number of observations to eps-embed") plt.ylabel("Minimum number of dimensions") plt.title("Johnson-Lindenstrauss bounds:\nn_samples vs n_components") # range of admissible distortions eps_range = np.linspace(0.01, 0.99, 100) # range of number of samples (observation) to embed n_samples_range = np.logspace(2, 6, 5) colors = plt.cm.Blues(np.linspace(0.3, 1.0, len(n_samples_range))) plt.figure() for n_samples, color in zip(n_samples_range, colors): min_n_components = johnson_lindenstrauss_min_dim(n_samples, eps=eps_range) plt.semilogy(eps_range, min_n_components, color=color) plt.legend(["n_samples = %d" % n for n in n_samples_range], loc="upper right") plt.xlabel("Distortion eps") plt.ylabel("Minimum number of dimensions") plt.title("Johnson-Lindenstrauss bounds:\nn_components vs eps") # Part 2: perform sparse random projection of some digits images which are # quite low dimensional and dense or documents of the 20 newsgroups dataset # which is both high dimensional and sparse if '--twenty-newsgroups' in sys.argv: # Need an internet connection hence not enabled by default data = fetch_20newsgroups_vectorized().data[:500] else: data = load_digits().data[:500] n_samples, n_features = data.shape print("Embedding %d samples with dim %d using various random projections" % (n_samples, n_features)) n_components_range = np.array([300, 1000, 10000]) dists = euclidean_distances(data, squared=True).ravel() # select only non-identical samples pairs nonzero = dists != 0 dists = dists[nonzero] for n_components in n_components_range: t0 = time() rp = SparseRandomProjection(n_components=n_components) projected_data = rp.fit_transform(data) print("Projected %d samples from %d to %d in %0.3fs" % (n_samples, n_features, n_components, time() - t0)) if hasattr(rp, 'components_'): n_bytes = rp.components_.data.nbytes n_bytes += rp.components_.indices.nbytes print("Random matrix with size: %0.3fMB" % (n_bytes / 1e6)) projected_dists = euclidean_distances( projected_data, squared=True).ravel()[nonzero] plt.figure() plt.hexbin(dists, projected_dists, gridsize=100, cmap=plt.cm.PuBu) plt.xlabel("Pairwise squared distances in original space") plt.ylabel("Pairwise squared distances in projected space") plt.title("Pairwise distances distribution for n_components=%d" % n_components) cb = plt.colorbar() cb.set_label('Sample pairs counts') rates = projected_dists / dists print("Mean distances rate: %0.2f (%0.2f)" % (np.mean(rates), np.std(rates))) plt.figure() plt.hist(rates, bins=50, normed=True, range=(0., 2.)) plt.xlabel("Squared distances rate: projected / original") plt.ylabel("Distribution of samples pairs") plt.title("Histogram of pairwise distance rates for n_components=%d" % n_components) # TODO: compute the expected value of eps and add them to the previous plot # as vertical lines / region plt.show()
bsd-3-clause
JT5D/scikit-learn
examples/plot_multilabel.py
9
4299
# Authors: Vlad Niculae, Mathieu Blondel # License: BSD 3 clause """ ========================= Multilabel classification ========================= This example simulates a multi-label document classification problem. The dataset is generated randomly based on the following process: - pick the number of labels: n ~ Poisson(n_labels) - n times, choose a class c: c ~ Multinomial(theta) - pick the document length: k ~ Poisson(length) - k times, choose a word: w ~ Multinomial(theta_c) In the above process, rejection sampling is used to make sure that n is more than 2, and that the document length is never zero. Likewise, we reject classes which have already been chosen. The documents that are assigned to both classes are plotted surrounded by two colored circles. The classification is performed by projecting to the first two principal components found by PCA and CCA for visualisation purposes, followed by using the :class:`sklearn.multiclass.OneVsRestClassifier` metaclassifier using two SVCs with linear kernels to learn a discriminative model for each class. Note that PCA is used to perform an unsupervised dimensionality reduction, while CCA is used to perform a supervised one. Note: in the plot, "unlabeled samples" does not mean that we don't know the labels (as in semi-supervised learning) but that the samples simply do *not* have a label. """ print(__doc__) import numpy as np import matplotlib.pylab as pl from sklearn.datasets import make_multilabel_classification from sklearn.multiclass import OneVsRestClassifier from sklearn.svm import SVC from sklearn.preprocessing import LabelBinarizer from sklearn.decomposition import PCA from sklearn.cross_decomposition import CCA def plot_hyperplane(clf, min_x, max_x, linestyle, label): # get the separating hyperplane w = clf.coef_[0] a = -w[0] / w[1] xx = np.linspace(min_x - 5, max_x + 5) # make sure the line is long enough yy = a * xx - (clf.intercept_[0]) / w[1] pl.plot(xx, yy, linestyle, label=label) def plot_subfigure(X, Y, subplot, title, transform): if transform == "pca": X = PCA(n_components=2).fit_transform(X) elif transform == "cca": # Convert list of tuples to a class indicator matrix first Y_indicator = LabelBinarizer().fit(Y).transform(Y) X = CCA(n_components=2).fit(X, Y_indicator).transform(X) else: raise ValueError min_x = np.min(X[:, 0]) max_x = np.max(X[:, 0]) min_y = np.min(X[:, 1]) max_y = np.max(X[:, 1]) classif = OneVsRestClassifier(SVC(kernel='linear')) classif.fit(X, Y) pl.subplot(2, 2, subplot) pl.title(title) zero_class = np.where([0 in y for y in Y]) one_class = np.where([1 in y for y in Y]) pl.scatter(X[:, 0], X[:, 1], s=40, c='gray') pl.scatter(X[zero_class, 0], X[zero_class, 1], s=160, edgecolors='b', facecolors='none', linewidths=2, label='Class 1') pl.scatter(X[one_class, 0], X[one_class, 1], s=80, edgecolors='orange', facecolors='none', linewidths=2, label='Class 2') plot_hyperplane(classif.estimators_[0], min_x, max_x, 'k--', 'Boundary\nfor class 1') plot_hyperplane(classif.estimators_[1], min_x, max_x, 'k-.', 'Boundary\nfor class 2') pl.xticks(()) pl.yticks(()) pl.xlim(min_x - .5 * max_x, max_x + .5 * max_x) pl.ylim(min_y - .5 * max_y, max_y + .5 * max_y) if subplot == 2: pl.xlabel('First principal component') pl.ylabel('Second principal component') pl.legend(loc="upper left") pl.figure(figsize=(8, 6)) X, Y = make_multilabel_classification(n_classes=2, n_labels=1, allow_unlabeled=True, random_state=1) plot_subfigure(X, Y, 1, "With unlabeled samples + CCA", "cca") plot_subfigure(X, Y, 2, "With unlabeled samples + PCA", "pca") X, Y = make_multilabel_classification(n_classes=2, n_labels=1, allow_unlabeled=False, random_state=1) plot_subfigure(X, Y, 3, "Without unlabeled samples + CCA", "cca") plot_subfigure(X, Y, 4, "Without unlabeled samples + PCA", "pca") pl.subplots_adjust(.04, .02, .97, .94, .09, .2) pl.show()
bsd-3-clause
bgris/ODL_bgris
lib/python3.5/site-packages/odl/util/graphics.py
1
15419
# Copyright 2014-2016 The ODL development group # # This file is part of ODL. # # ODL is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # ODL is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with ODL. If not, see <http://www.gnu.org/licenses/>. """Functions for graphical output.""" # Imports for common Python 2/3 codebase from __future__ import print_function, division, absolute_import from future import standard_library standard_library.install_aliases() import numpy as np from odl.util.testutils import run_doctests from odl.util.utility import is_real_dtype __all__ = ('show_discrete_data',) def _safe_minmax(values): """Calculate min and max of array with guards for nan and inf.""" # Nan and inf guarded min and max minval = np.min(values[np.isfinite(values)]) maxval = np.max(values[np.isfinite(values)]) return minval, maxval def _colorbar_ticks(minval, maxval): """Return the ticks (values show) in the colorbar.""" return [minval, (maxval + minval) / 2., maxval] def _digits(minval, maxval): """Digits needed to comforatbly display values in [minval, maxval]""" if minval == maxval: return 3 else: return min(10, max(2, int(1 + abs(np.log10(maxval - minval))))) def _colorbar_format(minval, maxval): """Return the format string for the colorbar.""" return '%.{}f'.format(_digits(minval, maxval)) def _axes_info(grid, npoints=5): result = [] min_pt = grid.min() max_pt = grid.max() for axis in range(grid.ndim): xmin = min_pt[axis] xmax = max_pt[axis] points = np.linspace(xmin, xmax, npoints) indices = np.linspace(0, grid.shape[axis] - 1, npoints, dtype=int) tick_values = grid.coord_vectors[axis][indices] # Do not use corner point in case of a partition, use outer corner tick_values[[0, -1]] = xmin, xmax format_str = '{:.' + str(_digits(xmin, xmax)) + 'f}' tick_labels = [format_str.format(f) for f in tick_values] result += [(points, tick_labels)] return result def show_discrete_data(values, grid, title=None, method='', force_show=False, fig=None, **kwargs): """Display a discrete 1d or 2d function. Parameters ---------- values : `numpy.ndarray` The values to visualize grid : `TensorGrid` or `RectPartition` Grid of the values title : string, optional Set the title of the figure method : string, optional 1d methods: 'plot' : graph plot 'scatter' : scattered 2d points (2nd axis <-> value) 2d methods: 'imshow' : image plot with coloring according to value, including a colorbar. 'scatter' : cloud of scattered 3d points (3rd axis <-> value) 'wireframe', 'plot_wireframe' : surface plot force_show : bool, optional Whether the plot should be forced to be shown now or deferred until later. Note that some backends always displays the plot, regardless of this value. fig : `matplotlib.figure.Figure`, optional The figure to show in. Expected to be of same "style", as the figure given by this function. The most common usecase is that fig is the return value from an earlier call to this function. Default: New figure interp : {'nearest', 'linear'}, optional Interpolation method to use. Default: 'nearest' axis_labels : string, optional Axis labels, default: ['x', 'y'] update_in_place : bool, optional Update the content of the figure in place. Intended for faster real time plotting, typically ~5 times faster. This is only performed for ``method == 'imshow'`` with real data and ``fig != None``. Otherwise this parameter is treated as False. Default: False axis_fontsize : int, optional Fontsize for the axes. Default: 16 kwargs : {'figsize', 'saveto', ...} Extra keyword arguments passed on to display method See the Matplotlib functions for documentation of extra options. Returns ------- fig : `matplotlib.figure.Figure` The resulting figure. It is also shown to the user. See Also -------- matplotlib.pyplot.plot : Show graph plot matplotlib.pyplot.imshow : Show data as image matplotlib.pyplot.scatter : Show scattered 3d points """ # Importing pyplot takes ~2 sec, only import when needed. import matplotlib.pyplot as plt args_re = [] args_im = [] dsp_kwargs = {} sub_kwargs = {} arrange_subplots = (121, 122) # horzontal arrangement # Create axis labels which remember their original meaning axis_labels = kwargs.pop('axis_labels', ['x', 'y']) values_are_complex = not is_real_dtype(values.dtype) figsize = kwargs.pop('figsize', None) saveto = kwargs.pop('saveto', None) interp = kwargs.pop('interp', 'nearest') axis_fontsize = kwargs.pop('axis_fontsize', 16) # Check if we should and can update the plot in place update_in_place = kwargs.pop('update_in_place', False) if (update_in_place and (fig is None or values_are_complex or values.ndim != 2 or (values.ndim == 2 and method not in ('', 'imshow')))): update_in_place = False if values.ndim == 1: # TODO: maybe a plotter class would be better if not method: if interp == 'nearest': method = 'step' dsp_kwargs['where'] = 'mid' elif interp == 'linear': method = 'plot' else: method = 'plot' if method == 'plot' or method == 'step' or method == 'scatter': args_re += [grid.coord_vectors[0], values.real] args_im += [grid.coord_vectors[0], values.imag] else: raise ValueError('`method` {!r} not supported' ''.format(method)) elif values.ndim == 2: if not method: method = 'imshow' if method == 'imshow': args_re = [np.rot90(values.real)] args_im = [np.rot90(values.imag)] if values_are_complex else [] extent = [grid.min()[0], grid.max()[0], grid.min()[1], grid.max()[1]] if interp == 'nearest': interpolation = 'nearest' elif interp == 'linear': interpolation = 'bilinear' else: interpolation = 'none' dsp_kwargs.update({'interpolation': interpolation, 'cmap': 'bone', 'extent': extent, 'aspect': 'auto'}) elif method == 'scatter': pts = grid.points() args_re = [pts[:, 0], pts[:, 1], values.ravel().real] args_im = ([pts[:, 0], pts[:, 1], values.ravel().imag] if values_are_complex else []) sub_kwargs.update({'projection': '3d'}) elif method in ('wireframe', 'plot_wireframe'): method = 'plot_wireframe' x, y = grid.meshgrid args_re = [x, y, np.rot90(values.real)] args_im = ([x, y, np.rot90(values.imag)] if values_are_complex else []) sub_kwargs.update({'projection': '3d'}) else: raise ValueError('`method` {!r} not supported' ''.format(method)) else: raise NotImplementedError('no method for {}d display implemented' ''.format(values.ndim)) # Additional keyword args are passed on to the display method dsp_kwargs.update(**kwargs) if fig is not None: # Reuse figure if given as input if not isinstance(fig, plt.Figure): raise TypeError('`fig` {} not a matplotlib figure'.format(fig)) if not plt.fignum_exists(fig.number): # If figure does not exist, user either closed the figure or # is using IPython, in this case we need a new figure. fig = plt.figure(figsize=figsize) updatefig = False else: # Set current figure to given input fig = plt.figure(fig.number) updatefig = True if values.ndim > 1 and not update_in_place: # If the figure is larger than 1d, we can clear it since we # dont reuse anything. Keeping it causes performance problems. fig.clf() else: fig = plt.figure(figsize=figsize) updatefig = False if values_are_complex: # Real if len(fig.axes) == 0: # Create new axis if needed sub_re = plt.subplot(arrange_subplots[0], **sub_kwargs) sub_re.set_title('Real part') sub_re.set_xlabel(axis_labels[0], fontsize=axis_fontsize) if values.ndim == 2: sub_re.set_ylabel(axis_labels[1], fontsize=axis_fontsize) else: sub_re.set_ylabel('value') else: sub_re = fig.axes[0] display_re = getattr(sub_re, method) csub_re = display_re(*args_re, **dsp_kwargs) # Axis ticks if method == 'imshow' and not grid.is_uniform: (xpts, xlabels), (ypts, ylabels) = _axes_info(grid) plt.xticks(xpts, xlabels) plt.yticks(ypts, ylabels) if method == 'imshow' and len(fig.axes) < 2: # Create colorbar if none seems to exist # Use clim from kwargs if given if 'clim' not in kwargs: minval_re, maxval_re = _safe_minmax(values.real) else: minval_re, maxval_re = kwargs['clim'] ticks_re = _colorbar_ticks(minval_re, maxval_re) format_re = _colorbar_format(minval_re, maxval_re) plt.colorbar(csub_re, orientation='horizontal', ticks=ticks_re, format=format_re) # Imaginary if len(fig.axes) < 3: sub_im = plt.subplot(arrange_subplots[1], **sub_kwargs) sub_im.set_title('Imaginary part') sub_im.set_xlabel(axis_labels[0], fontsize=axis_fontsize) if values.ndim == 2: sub_im.set_ylabel(axis_labels[1], fontsize=axis_fontsize) else: sub_im.set_ylabel('value') else: sub_im = fig.axes[2] display_im = getattr(sub_im, method) csub_im = display_im(*args_im, **dsp_kwargs) # Axis ticks if method == 'imshow' and not grid.is_uniform: (xpts, xlabels), (ypts, ylabels) = _axes_info(grid) plt.xticks(xpts, xlabels) plt.yticks(ypts, ylabels) if method == 'imshow' and len(fig.axes) < 4: # Create colorbar if none seems to exist # Use clim from kwargs if given if 'clim' not in kwargs: minval_im, maxval_im = _safe_minmax(values.imag) else: minval_im, maxval_im = kwargs['clim'] ticks_im = _colorbar_ticks(minval_im, maxval_im) format_im = _colorbar_format(minval_im, maxval_im) plt.colorbar(csub_im, orientation='horizontal', ticks=ticks_im, format=format_im) else: if len(fig.axes) == 0: # Create new axis object if needed sub = plt.subplot(111, **sub_kwargs) sub.set_xlabel(axis_labels[0], fontsize=axis_fontsize) if values.ndim == 2: sub.set_ylabel(axis_labels[1], fontsize=axis_fontsize) else: sub.set_ylabel('value') try: # For 3d plots sub.set_zlabel('z') except AttributeError: pass else: sub = fig.axes[0] if update_in_place: import matplotlib as mpl imgs = [obj for obj in sub.get_children() if isinstance(obj, mpl.image.AxesImage)] if len(imgs) > 0 and updatefig: imgs[0].set_data(args_re[0]) csub = imgs[0] # Update min-max if 'clim' not in kwargs: minval, maxval = _safe_minmax(values) else: minval, maxval = kwargs['clim'] csub.set_clim(minval, maxval) else: display = getattr(sub, method) csub = display(*args_re, **dsp_kwargs) else: display = getattr(sub, method) csub = display(*args_re, **dsp_kwargs) # Axis ticks if method == 'imshow' and not grid.is_uniform: (xpts, xlabels), (ypts, ylabels) = _axes_info(grid) plt.xticks(xpts, xlabels) plt.yticks(ypts, ylabels) if method == 'imshow': # Add colorbar # Use clim from kwargs if given if 'clim' not in kwargs: minval, maxval = _safe_minmax(values) else: minval, maxval = kwargs['clim'] ticks = _colorbar_ticks(minval, maxval) format = _colorbar_format(minval, maxval) if len(fig.axes) < 2: # Create colorbar if none seems to exist plt.colorbar(mappable=csub, ticks=ticks, format=format) elif update_in_place: # If it exists and we should update it csub.colorbar.set_clim(minval, maxval) csub.colorbar.set_ticks(ticks) csub.colorbar.set_ticklabels([format % tick for tick in ticks]) csub.colorbar.draw_all() # Fixes overlapping stuff at the expense of potentially squashed subplots if not update_in_place: fig.tight_layout() if title is not None: if not values_are_complex: # Do not overwrite title for complex values plt.title(title) fig.canvas.manager.set_window_title(title) if updatefig or plt.isinteractive(): # If we are running in interactive mode, we can always show the fig # This causes an artifact, where users of `CallbackShow` without # interactive mode only shows the figure after the second iteration. plt.show(block=False) if not update_in_place: plt.draw() plt.pause(0.0001) else: try: sub.draw_artist(csub) fig.canvas.blit(fig.bbox) fig.canvas.update() fig.canvas.flush_events() except AttributeError: plt.draw() plt.pause(0.0001) if force_show: plt.show() if saveto is not None: fig.savefig(saveto) return fig if __name__ == '__main__': run_doctests()
gpl-3.0
mjgrav2001/scikit-learn
sklearn/neighbors/graph.py
208
7031
"""Nearest Neighbors graph functions""" # Author: Jake Vanderplas <vanderplas@astro.washington.edu> # # License: BSD 3 clause (C) INRIA, University of Amsterdam import warnings from .base import KNeighborsMixin, RadiusNeighborsMixin from .unsupervised import NearestNeighbors def _check_params(X, metric, p, metric_params): """Check the validity of the input parameters""" params = zip(['metric', 'p', 'metric_params'], [metric, p, metric_params]) est_params = X.get_params() for param_name, func_param in params: if func_param != est_params[param_name]: raise ValueError( "Got %s for %s, while the estimator has %s for " "the same parameter." % ( func_param, param_name, est_params[param_name])) def _query_include_self(X, include_self, mode): """Return the query based on include_self param""" # Done to preserve backward compatibility. if include_self is None: if mode == "connectivity": warnings.warn( "The behavior of 'kneighbors_graph' when mode='connectivity' " "will change in version 0.18. Presently, the nearest neighbor " "of each sample is the sample itself. Beginning in version " "0.18, the default behavior will be to exclude each sample " "from being its own nearest neighbor. To maintain the current " "behavior, set include_self=True.", DeprecationWarning) include_self = True else: include_self = False if include_self: query = X._fit_X else: query = None return query def kneighbors_graph(X, n_neighbors, mode='connectivity', metric='minkowski', p=2, metric_params=None, include_self=None): """Computes the (weighted) graph of k-Neighbors for points in X Read more in the :ref:`User Guide <unsupervised_neighbors>`. Parameters ---------- X : array-like or BallTree, shape = [n_samples, n_features] Sample data, in the form of a numpy array or a precomputed :class:`BallTree`. n_neighbors : int Number of neighbors for each sample. mode : {'connectivity', 'distance'}, optional Type of returned matrix: 'connectivity' will return the connectivity matrix with ones and zeros, in 'distance' the edges are Euclidean distance between points. metric : string, default 'minkowski' The distance metric used to calculate the k-Neighbors for each sample point. The DistanceMetric class gives a list of available metrics. The default distance is 'euclidean' ('minkowski' metric with the p param equal to 2.) include_self: bool, default backward-compatible. Whether or not to mark each sample as the first nearest neighbor to itself. If `None`, then True is used for mode='connectivity' and False for mode='distance' as this will preserve backwards compatibilty. From version 0.18, the default value will be False, irrespective of the value of `mode`. p : int, default 2 Power parameter for the Minkowski metric. When p = 1, this is equivalent to using manhattan_distance (l1), and euclidean_distance (l2) for p = 2. For arbitrary p, minkowski_distance (l_p) is used. metric_params: dict, optional additional keyword arguments for the metric function. Returns ------- A : sparse matrix in CSR format, shape = [n_samples, n_samples] A[i, j] is assigned the weight of edge that connects i to j. Examples -------- >>> X = [[0], [3], [1]] >>> from sklearn.neighbors import kneighbors_graph >>> A = kneighbors_graph(X, 2) >>> A.toarray() array([[ 1., 0., 1.], [ 0., 1., 1.], [ 1., 0., 1.]]) See also -------- radius_neighbors_graph """ if not isinstance(X, KNeighborsMixin): X = NearestNeighbors(n_neighbors, metric=metric, p=p, metric_params=metric_params).fit(X) else: _check_params(X, metric, p, metric_params) query = _query_include_self(X, include_self, mode) return X.kneighbors_graph(X=query, n_neighbors=n_neighbors, mode=mode) def radius_neighbors_graph(X, radius, mode='connectivity', metric='minkowski', p=2, metric_params=None, include_self=None): """Computes the (weighted) graph of Neighbors for points in X Neighborhoods are restricted the points at a distance lower than radius. Read more in the :ref:`User Guide <unsupervised_neighbors>`. Parameters ---------- X : array-like or BallTree, shape = [n_samples, n_features] Sample data, in the form of a numpy array or a precomputed :class:`BallTree`. radius : float Radius of neighborhoods. mode : {'connectivity', 'distance'}, optional Type of returned matrix: 'connectivity' will return the connectivity matrix with ones and zeros, in 'distance' the edges are Euclidean distance between points. metric : string, default 'minkowski' The distance metric used to calculate the neighbors within a given radius for each sample point. The DistanceMetric class gives a list of available metrics. The default distance is 'euclidean' ('minkowski' metric with the param equal to 2.) include_self: bool, default None Whether or not to mark each sample as the first nearest neighbor to itself. If `None`, then True is used for mode='connectivity' and False for mode='distance' as this will preserve backwards compatibilty. From version 0.18, the default value will be False, irrespective of the value of `mode`. p : int, default 2 Power parameter for the Minkowski metric. When p = 1, this is equivalent to using manhattan_distance (l1), and euclidean_distance (l2) for p = 2. For arbitrary p, minkowski_distance (l_p) is used. metric_params: dict, optional additional keyword arguments for the metric function. Returns ------- A : sparse matrix in CSR format, shape = [n_samples, n_samples] A[i, j] is assigned the weight of edge that connects i to j. Examples -------- >>> X = [[0], [3], [1]] >>> from sklearn.neighbors import radius_neighbors_graph >>> A = radius_neighbors_graph(X, 1.5) >>> A.toarray() array([[ 1., 0., 1.], [ 0., 1., 0.], [ 1., 0., 1.]]) See also -------- kneighbors_graph """ if not isinstance(X, RadiusNeighborsMixin): X = NearestNeighbors(radius=radius, metric=metric, p=p, metric_params=metric_params).fit(X) else: _check_params(X, metric, p, metric_params) query = _query_include_self(X, include_self, mode) return X.radius_neighbors_graph(query, radius, mode)
bsd-3-clause
stephenliu1989/HK_DataMiner
hkdataminer/cluster/faiss_dbscan_.py
1
14197
# -*- coding: utf-8 -*- """ DBSCAN Acclerated by Facebook AI Faiss DBSCAN: Density-Based Spatial Clustering of Applications with Noise """ # Author: Robert Layton <robertlayton@gmail.com> # Joel Nothman <joel.nothman@gmail.com> # Lars Buitinck # # License: BSD 3 clause import numpy as np import time from scipy import sparse from numba import autojit import numba from sklearn.base import BaseEstimator, ClusterMixin from sklearn.utils import check_array, check_consistent_length #from sklearn.neighbors import NearestNeighbors from sklearn.cluster._dbscan_inner import dbscan_inner import faiss @autojit def get_neighborhoods(D, I, eps): neighborhoods = [] for i in range(len(D)): distances = D[i] #print(distances) distances = np.delete(distances, 0) indices = I[i] indices = np.delete(indices, 0) #print(indices) index = indices[distances <= eps] neighborhoods.append(index) #neighborhoods = np.asarray(neighborhoods) #np.savetxt('faiss_neighborhoods', np.asarray(neighborhoods), fmt='%s') return np.asarray(neighborhoods) def cpu_radius_neighbors(X, eps, min_samples, nlist, nprobe, return_distance=False, IVFFlat=True): dimension = X.shape[1] if IVFFlat is True: quantizer = faiss.IndexFlatL2(dimension) index_cpu = faiss.IndexIVFFlat(quantizer, dimension, nlist, faiss.METRIC_L2) # here we specify METRIC_L2, by default it performs inner-product search assert not index_cpu.is_trained index_cpu.train(X) assert index_cpu.is_trained # here we specify METRIC_L2, by default it performs inner-product search else: index_cpu = faiss.IndexFlatL2(dimension) index_cpu.add(X) n_samples = 1000 k = min_samples samples = np.random.choice(len(X), n_samples) # print(samples) D, I = index_cpu.search(X[samples], k) # sanity check while np.min(np.amax(D, axis=1)) < eps: k = k * 2 # D, I = index_gpu.search(X[samples], k) #print(np.min(np.amax(D, axis=1)), eps, k) D, I = index_cpu.search(X[samples], k) if k > 1024: k = 1000 #print(np.max(D[:, k - 1]), k, eps) index_cpu.nprobe = nprobe D, I = index_cpu.search(X, k) # actual search return get_neighborhoods(D, I, eps) def gpu_radius_neighbors(X, eps, min_samples, nlist, nprobe, return_distance=False, IVFFlat=True): dimension = X.shape[1] if IVFFlat is True: quantizer = faiss.IndexFlatL2(dimension) index_cpu = faiss.IndexIVFFlat(quantizer, dimension, nlist, faiss.METRIC_L2) # here we specify METRIC_L2, by default it performs inner-product search res = faiss.StandardGpuResources() # use a single GPU flat_config = faiss.GpuIndexFlatConfig() flat_config.device = 0 # make it an IVF GPU index index_gpu = faiss.index_cpu_to_gpu(res, 0, index_cpu) assert not index_gpu.is_trained index_gpu.train(X) assert index_gpu.is_trained # here we specify METRIC_L2, by default it performs inner-product search else: index_cpu = faiss.IndexFlatL2(dimension) res = faiss.StandardGpuResources() flat_config = faiss.GpuIndexFlatConfig() flat_config.device = 0 index_gpu = faiss.index_cpu_to_gpu(res, 0, index_cpu) index_gpu.add(X) n_samples = 1000 k = min_samples samples = np.random.choice(len(X), n_samples) # print(samples) D, I = index_gpu.search(X[samples], k) # sanity check while np.max(D[:, k - 1]) < eps: k = k * 2 D, I = index_gpu.search(X[samples], k) #print(np.max(D[:, k - 1]), k, eps) index_gpu.nprobe = nprobe D, I = index_gpu.search(X, k) # actual search return get_neighborhoods(D, I, eps) def faiss_dbscan(X, eps=0.5, min_samples=5, nlist=100, nprobe=5, metric='l2', metric_params=None, algorithm='auto', leaf_size=30, p=2, sample_weight=None, n_jobs=1, GPU=False, IVFFlat=True): """Perform DBSCAN clustering from vector array or distance matrix. Read more in the :ref:`User Guide <dbscan>`. Parameters ---------- X : array or sparse (CSR) matrix of shape (n_samples, n_features), or \ array of shape (n_samples, n_samples) A feature array, or array of distances between samples if ``metric='precomputed'``. eps : float, optional The maximum distance between two samples for them to be considered as in the same neighborhood. min_samples : int, optional The number of samples (or total weight) in a neighborhood for a point to be considered as a core point. This includes the point itself. metric : string, or callable The metric to use when calculating distance between instances in a feature array. If metric is a string or callable, it must be one of the options allowed by metrics.pairwise.pairwise_distances for its metric parameter. If metric is "precomputed", X is assumed to be a distance matrix and must be square. X may be a sparse matrix, in which case only "nonzero" elements may be considered neighbors for DBSCAN. metric_params : dict, optional Additional keyword arguments for the metric function. .. versionadded:: 0.19 algorithm : {'auto', 'ball_tree', 'kd_tree', 'brute'}, optional The algorithm to be used by the NearestNeighbors module to compute pointwise distances and find nearest neighbors. See NearestNeighbors module documentation for details. leaf_size : int, optional (default = 30) Leaf size passed to BallTree or cKDTree. This can affect the speed of the construction and query, as well as the memory required to store the tree. The optimal value depends on the nature of the problem. p : float, optional The power of the Minkowski metric to be used to calculate distance between points. sample_weight : array, shape (n_samples,), optional Weight of each sample, such that a sample with a weight of at least ``min_samples`` is by itself a core sample; a sample with negative weight may inhibit its eps-neighbor from being core. Note that weights are absolute, and default to 1. n_jobs : int, optional (default = 1) The number of parallel jobs to run for neighbors search. If ``-1``, then the number of jobs is set to the number of CPU cores. Returns ------- core_samples : array [n_core_samples] Indices of core samples. labels : array [n_samples] Cluster labels for each point. Noisy samples are given the label -1. Notes ----- See examples/cluster/plot_dbscan.py for an example. This implementation bulk-computes all neighborhood queries, which increases the memory complexity to O(n.d) where d is the average number of neighbors, while original DBSCAN had memory complexity O(n). Sparse neighborhoods can be precomputed using :func:`NearestNeighbors.radius_neighbors_graph <sklearn.neighbors.NearestNeighbors.radius_neighbors_graph>` with ``mode='distance'``. References ---------- Ester, M., H. P. Kriegel, J. Sander, and X. Xu, "A Density-Based Algorithm for Discovering Clusters in Large Spatial Databases with Noise". In: Proceedings of the 2nd International Conference on Knowledge Discovery and Data Mining, Portland, OR, AAAI Press, pp. 226-231. 1996 """ if not eps > 0.0: raise ValueError("eps must be positive.") if sample_weight is not None: sample_weight = np.asarray(sample_weight) check_consistent_length(X, sample_weight) # Calculate neighborhood for all samples. This leaves the original point # in, which needs to be considered later (i.e. point i is in the # neighborhood of point i. While True, its useless information) if GPU is True: neighborhoods = gpu_radius_neighbors(X, eps, min_samples, nlist, nprobe, return_distance=False, IVFFlat=IVFFlat) else: neighborhoods = cpu_radius_neighbors(X, eps, min_samples, nlist, nprobe, return_distance=False, IVFFlat=IVFFlat) if sample_weight is None: n_neighbors = np.array([len(neighbors) for neighbors in neighborhoods]) else: n_neighbors = np.array([np.sum(sample_weight[neighbors]) for neighbors in neighborhoods]) # Initially, all samples are noise. labels = -np.ones(X.shape[0], dtype=np.intp) # A list of all core samples found. core_samples = np.asarray(n_neighbors >= min_samples, dtype=np.uint8) dbscan_inner(core_samples, neighborhoods, labels) return np.where(core_samples)[0], labels class Faiss_DBSCAN(BaseEstimator, ClusterMixin): """Perform DBSCAN clustering from vector array or distance matrix. DBSCAN - Density-Based Spatial Clustering of Applications with Noise. Finds core samples of high density and expands clusters from them. Good for data which contains clusters of similar density. Read more in the :ref:`User Guide <dbscan>`. Parameters ---------- eps : float, optional The maximum distance between two samples for them to be considered as in the same neighborhood. min_samples : int, optional The number of samples (or total weight) in a neighborhood for a point to be considered as a core point. This includes the point itself. metric : string, or callable The metric to use when calculating distance between instances in a feature array. If metric is a string or callable, it must be one of the options allowed by metrics.pairwise.calculate_distance for its metric parameter. If metric is "precomputed", X is assumed to be a distance matrix and must be square. X may be a sparse matrix, in which case only "nonzero" elements may be considered neighbors for DBSCAN. .. versionadded:: 0.17 metric *precomputed* to accept precomputed sparse matrix. metric_params : dict, optional Additional keyword arguments for the metric function. .. versionadded:: 0.19 algorithm : {'auto', 'ball_tree', 'kd_tree', 'brute'}, optional The algorithm to be used by the NearestNeighbors module to compute pointwise distances and find nearest neighbors. See NearestNeighbors module documentation for details. leaf_size : int, optional (default = 30) Leaf size passed to BallTree or cKDTree. This can affect the speed of the construction and query, as well as the memory required to store the tree. The optimal value depends on the nature of the problem. p : float, optional The power of the Minkowski metric to be used to calculate distance between points. n_jobs : int, optional (default = 1) The number of parallel jobs to run. If ``-1``, then the number of jobs is set to the number of CPU cores. Attributes ---------- core_sample_indices_ : array, shape = [n_core_samples] Indices of core samples. components_ : array, shape = [n_core_samples, n_features] Copy of each core sample found by training. labels_ : array, shape = [n_samples] Cluster labels for each point in the dataset given to fit(). Noisy samples are given the label -1. Notes ----- See examples/cluster/plot_dbscan.py for an example. This implementation bulk-computes all neighborhood queries, which increases the memory complexity to O(n.d) where d is the average number of neighbors, while original DBSCAN had memory complexity O(n). Sparse neighborhoods can be precomputed using :func:`NearestNeighbors.radius_neighbors_graph <sklearn.neighbors.NearestNeighbors.radius_neighbors_graph>` with ``mode='distance'``. References ---------- Ester, M., H. P. Kriegel, J. Sander, and X. Xu, "A Density-Based Algorithm for Discovering Clusters in Large Spatial Databases with Noise". In: Proceedings of the 2nd International Conference on Knowledge Discovery and Data Mining, Portland, OR, AAAI Press, pp. 226-231. 1996 """ def __init__(self, eps=0.5, min_samples=5, nlist=100, nprobe=5, metric='l2', n_jobs=1, GPU=False, IVFFlat=True): self.eps = eps self.min_samples = min_samples self.metric = metric self.n_jobs = n_jobs self.GPU = GPU self.IVFFlat = IVFFlat self.nlist = nlist self.nprobe = nprobe def fit(self, X, y=None, sample_weight=None): """Perform DBSCAN clustering from features or distance matrix. Parameters ---------- X : array or sparse (CSR) matrix of shape (n_samples, n_features), or \ array of shape (n_samples, n_samples) A feature array, or array of distances between samples if ``metric='precomputed'``. sample_weight : array, shape (n_samples,), optional Weight of each sample, such that a sample with a weight of at least ``min_samples`` is by itself a core sample; a sample with negative weight may inhibit its eps-neighbor from being core. Note that weights are absolute, and default to 1. """ #if metric is not "rmsd": # X = check_array(X, accept_sparse='csr') #t0 = time.time() clust = faiss_dbscan(X, eps=self.eps, min_samples=self.min_samples, nlist=self.nlist, nprobe=self.nprobe, sample_weight=sample_weight, GPU=self.GPU, IVFFlat=self.IVFFlat) #t1 = time.time() #print("Faiss DBSCAN clustering Time Cost:", t1 - t0) self.core_sample_indices_, self.labels_ = clust if len(self.core_sample_indices_): # fix for scipy sparse indexing issue self.components_ = X[self.core_sample_indices_].copy() else: # no core samples self.components_ = np.empty((0, X.shape[1])) return self
apache-2.0
hainm/scikit-learn
examples/cluster/plot_kmeans_assumptions.py
270
2040
""" ==================================== Demonstration of k-means assumptions ==================================== This example is meant to illustrate situations where k-means will produce unintuitive and possibly unexpected clusters. In the first three plots, the input data does not conform to some implicit assumption that k-means makes and undesirable clusters are produced as a result. In the last plot, k-means returns intuitive clusters despite unevenly sized blobs. """ print(__doc__) # Author: Phil Roth <mr.phil.roth@gmail.com> # License: BSD 3 clause import numpy as np import matplotlib.pyplot as plt from sklearn.cluster import KMeans from sklearn.datasets import make_blobs plt.figure(figsize=(12, 12)) n_samples = 1500 random_state = 170 X, y = make_blobs(n_samples=n_samples, random_state=random_state) # Incorrect number of clusters y_pred = KMeans(n_clusters=2, random_state=random_state).fit_predict(X) plt.subplot(221) plt.scatter(X[:, 0], X[:, 1], c=y_pred) plt.title("Incorrect Number of Blobs") # Anisotropicly distributed data transformation = [[ 0.60834549, -0.63667341], [-0.40887718, 0.85253229]] X_aniso = np.dot(X, transformation) y_pred = KMeans(n_clusters=3, random_state=random_state).fit_predict(X_aniso) plt.subplot(222) plt.scatter(X_aniso[:, 0], X_aniso[:, 1], c=y_pred) plt.title("Anisotropicly Distributed Blobs") # Different variance X_varied, y_varied = make_blobs(n_samples=n_samples, cluster_std=[1.0, 2.5, 0.5], random_state=random_state) y_pred = KMeans(n_clusters=3, random_state=random_state).fit_predict(X_varied) plt.subplot(223) plt.scatter(X_varied[:, 0], X_varied[:, 1], c=y_pred) plt.title("Unequal Variance") # Unevenly sized blobs X_filtered = np.vstack((X[y == 0][:500], X[y == 1][:100], X[y == 2][:10])) y_pred = KMeans(n_clusters=3, random_state=random_state).fit_predict(X_filtered) plt.subplot(224) plt.scatter(X_filtered[:, 0], X_filtered[:, 1], c=y_pred) plt.title("Unevenly Sized Blobs") plt.show()
bsd-3-clause
Healthcast/RSV
python/all_year_predict/methods.py
2
3879
#!/usr/bin/pyhton import numpy as np import matplotlib.pyplot as plt from sklearn import datasets, neighbors, linear_model from sklearn import svm from sklearn import metrics from sklearn.cross_validation import train_test_split from sklearn.ensemble import RandomForestClassifier def apply_algorithm(paras, X, y): if paras['clf'] == 'svm': clf = svm.SVC(kernel=paras['svm'][1], C=paras['svm'][0], probability=True) elif paras['clf'] == 'knn': clf = neighbors.KNeighborsClassifier(paras['knn'][0],\ weights=paras['knn'][1]) elif paras['clf'] == 'rf': clf = RandomForestClassifier(max_depth=paras['rf'][0], \ n_estimators=paras['rf'][1],\ max_features=paras['rf'][2]) else: print str("unknown classifier") sys.exit(2) return clf def apply_evaluation(paras, X, y, clf, data): X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=.3, \ random_state=0) clf.fit(X_train, y_train) r = clf.predict(X_test) d = clf.decision_function(X) p = clf.predict_proba(X).T[1]*3 h = data["hospital"].T[data["city"].index(paras["city"])] h1 = h.astype(float) m = max(h1) h1=h1/m*4 plt.figure() # plt.plot(d) plt.plot(y) plt.plot(h1) plt.plot(p) # height = 4 # bottom = -2 # ss = data["season_start"] # date=data["date1"] # c_id = data["city"].index(paras["city"]) # ylabel = data["ylabels"] # for m in ss: # plt.plot([m, m],[bottom, height], 'y--', linewidth=1) # # for m in range(1, len(ss)-1): # a = ss[m] # plt.text(a-5,height, date[a].split('-')[0]) # # #plot the start week # up=1 # for j in range(len(ylabel.T[c_id])-1): # if ylabel.T[c_id,j] == 1 : # plt.plot([j, j],[bottom, height], 'k-', linewidth=2) # if up==1: # plt.text(j-10, height-1, date[j]) # up=0 # else: # plt.text(j-10, height-2, date[j]) # up=1 # plt.show() #plot the results # x_min, x_max = X_train[:, 0].min() - 1, X_train[:, 0].max() + 1 # y_min, y_max = X_train[:, 1].min() - 1, X_train[:, 1].max() + 1 # # xx, yy = np.meshgrid(np.arange(x_min, x_max, 1), np.arange(y_min, y_max, 1)) # Z = clf.predict(np.c_[xx.ravel(), yy.ravel()]) # Z = Z.reshape(xx.shape) # # plt.figure() # plt.pcolormesh(xx, yy, Z) # plt.scatter(X_train[:, 0], X_train[:, 1], c=y_train) # plt.xlim(xx.min(), xx.max()) # plt.ylim(yy.min(), yy.max()) # plt.title("binary classification classification") # plt.show() # if paras['eva'] == 'accuracy': print "The accuracy:" print metrics.accuracy_score(y_test, r) elif paras['eva'] == 'precision': print "The precision:" print metrics.precision_score(y_test, r) elif paras['eva'] == 'recall': print "The recall:" print metrics.recall_score(y_test, r) elif paras['eva'] == 'confusion': print "The confusion matrix:" print metrics.confusion_matrix(y_test, r) elif paras['eva'] == 'report': print "The report:" print metrics.classification_report(y_test, r) elif paras['eva'] == 'roc' and paras['clf'] == 'svm': scores = clf.decision_function(X_test) print "The auc:" fpr, tpr, thresholds = metrics.roc_curve(y_test, scores) roc_auc = metrics.auc(fpr, tpr) print str(roc_auc) plt.figure() plt.plot(fpr, tpr, label='ROC curve (area = %0.2f)' % roc_auc) plt.plot([0, 1], [0, 1], 'k--') plt.xlabel('False Positive Rate') plt.ylabel('True Positive Rate') plt.show()
gpl-2.0
sysid/kg
quora/Ensemble_CNN_TD_Quora.py
1
12948
# coding: utf-8 # In[1]: import pandas as pd import numpy as np import nltk from nltk.corpus import stopwords from nltk.stem import SnowballStemmer import re from sklearn.metrics import accuracy_score import matplotlib.pyplot as plt # In[2]: train = pd.read_csv("../input/train.csv") test = pd.read_csv("../input/test.csv") # In[3]: train.head() # In[4]: test.head() # In[5]: print(train.shape) print(test.shape) # In[6]: print(train.isnull().sum()) print(test.isnull().sum()) # In[7]: train = train.fillna('empty') test = test.fillna('empty') # In[8]: print(train.isnull().sum()) print(test.isnull().sum()) # In[9]: test.head() # In[10]: for i in range(6): print(train.question1[i]) print(train.question2[i]) print() # In[17]: def text_to_wordlist(text, remove_stopwords=False, stem_words=False): # Clean the text, with the option to remove stopwords and to stem words. # Convert words to lower case and split them text = text.lower().split() # Optionally remove stop words (true by default) if remove_stopwords: stops = set(stopwords.words("english")) text = [w for w in text if not w in stops] text = " ".join(text) # Clean the text text = re.sub(r"[^A-Za-z0-9^,!.\'+-=]", " ", text) text = re.sub(r"\'s", " 's ", text) text = re.sub(r"\'ve", " have ", text) text = re.sub(r"can't", " cannot ", text) text = re.sub(r"n't", " not ", text) text = re.sub(r"\'re", " are ", text) text = re.sub(r"\'d", " would ", text) text = re.sub(r"\'ll", " will ", text) text = re.sub(r",", " ", text) text = re.sub(r"\.", " ", text) text = re.sub(r"!", " ! ", text) text = re.sub(r"\^", " ^ ", text) text = re.sub(r"\+", " + ", text) text = re.sub(r"\-", " - ", text) text = re.sub(r"\=", " = ", text) text = re.sub(r"\s{2,}", " ", text) # Shorten words to their stems if stem_words: text = text.split() stemmer = SnowballStemmer('english') stemmed_words = [stemmer.stem(word) for word in text] text = " ".join(stemmed_words) # Return a list of words return(text) # In[18]: def process_questions(question_list, questions, question_list_name, dataframe): # function to transform questions and display progress for question in questions: question_list.append(text_to_wordlist(question)) if len(question_list) % 100000 == 0: progress = len(question_list)/len(dataframe) * 100 print("{} is {}% complete.".format(question_list_name, round(progress, 1))) # In[19]: train_question1 = [] process_questions(train_question1, train.question1, 'train_question1', train) # In[35]: train_question2 = [] process_questions(train_question2, train.question2, 'train_question2', train) # In[36]: test_question1 = [] process_questions(test_question1, test.question1, 'test_question1', test) # In[37]: test_question2 = [] process_questions(test_question2, test.question2, 'test_question2', test) # # Using Keras # In[38]: from keras.preprocessing.text import Tokenizer from keras.preprocessing.sequence import pad_sequences import datetime, time, json from keras.models import Sequential from keras.layers import Embedding, Dense, Dropout, Reshape, Merge, BatchNormalization, TimeDistributed, Lambda, Activation, LSTM, Flatten, Bidirectional, Convolution1D, GRU, MaxPooling1D, Convolution2D from keras.regularizers import l2 from keras.callbacks import Callback, ModelCheckpoint, EarlyStopping from keras import backend as K from sklearn.model_selection import train_test_split from keras.optimizers import SGD from collections import defaultdict # In[39]: # Count the number of different words in the reviews word_count = defaultdict(int) for question in train_question1: word_count[question] += 1 print("train_question1 is complete.") for question in train_question2: word_count[question] += 1 print("train_question2 is complete") for question in test_question1: word_count[question] += 1 print("test_question1 is complete.") for question in test_question2: word_count[question] += 1 print("test_question2 is complete") print("Total number of unique words:", len(word_count)) # In[40]: # Find the length of questions lengths = [] for question in train_question1: lengths.append(len(question.split())) for question in train_question2: lengths.append(len(question.split())) # Create a dataframe so that the values can be inspected lengths = pd.DataFrame(lengths, columns=['counts']) # In[41]: lengths.counts.describe() # In[42]: np.percentile(lengths.counts, 99.5) # In[43]: num_words = 200000 train_questions = train_question1 + train_question2 tokenizer = Tokenizer(nb_words = num_words) tokenizer.fit_on_texts(train_questions) print("Fitting is compelte.") train_question1_word_sequences = tokenizer.texts_to_sequences(train_question1) print("train_question1 is complete.") train_question2_word_sequences = tokenizer.texts_to_sequences(train_question2) print("train_question2 is complete") # In[44]: test_question1_word_sequences = tokenizer.texts_to_sequences(test_question1) print("test_question1 is complete.") test_question2_word_sequences = tokenizer.texts_to_sequences(test_question2) print("test_question2 is complete.") # In[45]: word_index = tokenizer.word_index print("Words in index: %d" % len(word_index)) # In[46]: # Pad the questions so that they all have the same length. max_question_len = 37 train_q1 = pad_sequences(train_question1_word_sequences, maxlen = max_question_len, padding = 'post', truncating = 'post') print("train_q1 is complete.") train_q2 = pad_sequences(train_question2_word_sequences, maxlen = max_question_len, padding = 'post', truncating = 'post') print("train_q2 is complete.") # In[47]: test_q1 = pad_sequences(test_question1_word_sequences, maxlen = max_question_len, padding = 'post', truncating = 'post') print("test_q1 is complete.") test_q2 = pad_sequences(test_question2_word_sequences, maxlen = max_question_len, padding = 'post', truncating = 'post') print("test_q2 is complete.") # In[48]: y_train = train.is_duplicate # In[49]: # Load GloVe to use pretrained vectors # From this link: https://nlp.stanford.edu/projects/glove/ embeddings_index = {} with open('glove.840B.300d.txt', encoding='utf-8') as f: for line in f: values = line.split(' ') word = values[0] embedding = np.asarray(values[1:], dtype='float32') embeddings_index[word] = embedding print('Word embeddings:', len(embeddings_index)) # In[50]: # Need to use 300 for embedding dimensions to match GloVe vectors. embedding_dim = 300 nb_words = len(word_index) word_embedding_matrix = np.zeros((nb_words + 1, embedding_dim)) for word, i in word_index.items(): embedding_vector = embeddings_index.get(word) if embedding_vector is not None: # words not found in embedding index will be all-zeros. word_embedding_matrix[i] = embedding_vector print('Null word embeddings: %d' % np.sum(np.sum(word_embedding_matrix, axis=1) == 0)) # In[66]: units = 150 dropout = 0.25 nb_filter = 32 filter_length = 3 embedding_dim = 300 model1 = Sequential() model1.add(Embedding(nb_words + 1, embedding_dim, weights = [word_embedding_matrix], input_length = max_question_len, trainable = False)) model1.add(Convolution1D(nb_filter = nb_filter, filter_length = filter_length, border_mode = 'same')) model1.add(BatchNormalization()) model1.add(Activation('relu')) model1.add(Dropout(dropout)) model1.add(Convolution1D(nb_filter = nb_filter, filter_length = filter_length, border_mode = 'same')) model1.add(BatchNormalization()) model1.add(Activation('relu')) model1.add(Dropout(dropout)) model1.add(Flatten()) model2 = Sequential() model2.add(Embedding(nb_words + 1, embedding_dim, weights = [word_embedding_matrix], input_length = max_question_len, trainable = False)) model2.add(Convolution1D(nb_filter = nb_filter, filter_length = filter_length, border_mode = 'same')) model2.add(BatchNormalization()) model2.add(Activation('relu')) model2.add(Dropout(dropout)) model2.add(Convolution1D(nb_filter = nb_filter, filter_length = filter_length, border_mode = 'same')) model2.add(BatchNormalization()) model2.add(Activation('relu')) model2.add(Dropout(dropout)) model2.add(Flatten()) model3 = Sequential() model3.add(Embedding(nb_words + 1, embedding_dim, weights = [word_embedding_matrix], input_length = max_question_len, trainable = False)) model3.add(TimeDistributed(Dense(embedding_dim))) model3.add(BatchNormalization()) model3.add(Activation('relu')) model3.add(Dropout(dropout)) model3.add(Lambda(lambda x: K.max(x, axis=1), output_shape=(embedding_dim, ))) model4 = Sequential() model4.add(Embedding(nb_words + 1, embedding_dim, weights = [word_embedding_matrix], input_length = max_question_len, trainable = False)) model4.add(TimeDistributed(Dense(embedding_dim))) model4.add(BatchNormalization()) model4.add(Activation('relu')) model4.add(Dropout(dropout)) model4.add(Lambda(lambda x: K.max(x, axis=1), output_shape=(embedding_dim, ))) modela = Sequential() modela.add(Merge([model1, model2], mode='concat')) modela.add(Dense(units)) modela.add(BatchNormalization()) modela.add(Activation('relu')) modela.add(Dropout(dropout)) modela.add(Dense(units)) modela.add(BatchNormalization()) modela.add(Activation('relu')) modela.add(Dropout(dropout)) modelb = Sequential() modelb.add(Merge([model3, model4], mode='concat')) modelb.add(Dense(units)) modelb.add(BatchNormalization()) modelb.add(Activation('relu')) modelb.add(Dropout(dropout)) modelb.add(Dense(units)) modelb.add(BatchNormalization()) modelb.add(Activation('relu')) modelb.add(Dropout(dropout)) model = Sequential() model.add(Merge([modela, modelb], mode='concat')) model.add(Dense(units)) model.add(BatchNormalization()) model.add(Activation('relu')) model.add(Dropout(dropout)) model.add(Dense(units)) model.add(BatchNormalization()) model.add(Activation('relu')) model.add(Dropout(dropout)) model.add(Dense(1)) model.add(BatchNormalization()) model.add(Activation('sigmoid')) #sgd = SGD(lr=0.01, decay=5e-6, momentum=0.9, nesterov=True) model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy']) # In[67]: save_best_weights = 'question_pairs_weights.h5' t0 = time.time() callbacks = [ModelCheckpoint(save_best_weights, monitor='val_loss', save_best_only=True), EarlyStopping(monitor='val_loss', patience=5, verbose=1, mode='auto')] history = model.fit([train_q1, train_q2], y_train, batch_size=200, nb_epoch=100, validation_split=0.1, verbose=True, shuffle=True, callbacks=callbacks) t1 = time.time() print("Minutes elapsed: %f" % ((t1 - t0) / 60.)) # In[68]: summary_stats = pd.DataFrame({'epoch': [ i + 1 for i in history.epoch ], 'train_acc': history.history['acc'], 'valid_acc': history.history['val_acc'], 'train_loss': history.history['loss'], 'valid_loss': history.history['val_loss']}) # In[69]: summary_stats # In[70]: plt.plot(summary_stats.train_loss) plt.plot(summary_stats.valid_loss) plt.show() # In[71]: min_loss, idx = min((loss, idx) for (idx, loss) in enumerate(history.history['val_loss'])) print('Minimum loss at epoch', '{:d}'.format(idx+1), '=', '{:.4f}'.format(min_loss)) min_loss = round(min_loss, 4) # In[72]: model.load_weights(save_best_weights) predictions = model.predict([test_q1, test_q2], verbose = True) # In[73]: #Create submission submission = pd.DataFrame(predictions, columns=['is_duplicate']) submission.insert(0, 'test_id', test.test_id) file_name = 'submission_{}.csv'.format(min_loss) submission.to_csv(file_name, index=False) # In[74]: submission.head(10)
mit
nicholaschris/landsatpy
utils.py
1
2693
import operator import pandas as pd import numpy as np from numpy import ma from scipy.misc import imresize import scipy.ndimage as ndimage from skimage.morphology import disk, dilation def get_truth(input_one, input_two, comparison): # too much abstraction ops = {'>': operator.gt, '<': operator.lt, '>=': operator.ge, '<=': operator.le, '=': operator.eq} return ops[comparison](input_one, input_two) def convert_to_celsius(brightness_temp_input): return brightness_temp_input - 272.15 def calculate_percentile(input_masked_array, percentile): flat_fill_input = input_masked_array.filled(np.nan).flatten() df = pd.DataFrame(flat_fill_input) percentile = df.quantile(percentile/100.0) return percentile[0] def save_object(obj, filename): import pickle with open(filename, 'wb') as output: pickle.dump(obj, output) def downsample(input_array, factor=4): output_array = input_array[::2, ::2] / 4 + input_array[1::2, ::2] / 4 + input_array[::2, 1::2] / 4 + input_array[1::2, 1::2] / 4 return output_array def dilate_boolean_array(input_array, disk_size=3): selem = disk(disk_size) dilated = dilation(input_array, selem) return dilated def get_resized_array(img, size): lena = imresize(img, (size, size)) return lena def interp_and_resize(array, new_length): orig_y_length, orig_x_length = array.shape interp_factor_y = new_length / orig_y_length interp_factor_x = new_length / orig_x_length y = round(interp_factor_y * orig_y_length) x = round(interp_factor_x * orig_x_length) # http://docs.scipy.org/doc/numpy/reference/generated/numpy.mgrid.html new_indicies = np.mgrid[0:orig_y_length:y * 1j, 0:orig_x_length:x * 1j] # order=1 indicates bilinear interpolation. interp_array = ndimage.map_coordinates(array, new_indicies, order=1, output=array.dtype) interp_array = interp_array.reshape((y, x)) return interp_array def parse_mtl(in_file): awesome = True f = open(in_file, 'r') print(in_file) mtl_dict = {} with open(in_file, 'r') as f: while awesome: line = f.readline() if line.strip() == '' or line.strip() == 'END': return mtl_dict elif 'END_GROUP' in line: pass elif 'GROUP' in line: curr_group = line.split('=')[1].strip() mtl_dict[curr_group] = {} else: attr, value = line.split('=')[0].strip(), line.split('=')[1].strip() mtl_dict[curr_group][attr] = value
mit
jseabold/statsmodels
examples/python/contrasts.py
5
9020
# coding: utf-8 # DO NOT EDIT # Autogenerated from the notebook contrasts.ipynb. # Edit the notebook and then sync the output with this file. # # flake8: noqa # DO NOT EDIT # # Contrasts Overview import numpy as np import statsmodels.api as sm # This document is based heavily on this excellent resource from UCLA # http://www.ats.ucla.edu/stat/r/library/contrast_coding.htm # A categorical variable of K categories, or levels, usually enters a # regression as a sequence of K-1 dummy variables. This amounts to a linear # hypothesis on the level means. That is, each test statistic for these # variables amounts to testing whether the mean for that level is # statistically significantly different from the mean of the base category. # This dummy coding is called Treatment coding in R parlance, and we will # follow this convention. There are, however, different coding methods that # amount to different sets of linear hypotheses. # # In fact, the dummy coding is not technically a contrast coding. This is # because the dummy variables add to one and are not functionally # independent of the model's intercept. On the other hand, a set of # *contrasts* for a categorical variable with `k` levels is a set of `k-1` # functionally independent linear combinations of the factor level means # that are also independent of the sum of the dummy variables. The dummy # coding is not wrong *per se*. It captures all of the coefficients, but it # complicates matters when the model assumes independence of the # coefficients such as in ANOVA. Linear regression models do not assume # independence of the coefficients and thus dummy coding is often the only # coding that is taught in this context. # # To have a look at the contrast matrices in Patsy, we will use data from # UCLA ATS. First let's load the data. # #### Example Data import pandas as pd url = 'https://stats.idre.ucla.edu/stat/data/hsb2.csv' hsb2 = pd.read_table(url, delimiter=",") hsb2.head(10) # It will be instructive to look at the mean of the dependent variable, # write, for each level of race ((1 = Hispanic, 2 = Asian, 3 = African # American and 4 = Caucasian)). hsb2.groupby('race')['write'].mean() # #### Treatment (Dummy) Coding # Dummy coding is likely the most well known coding scheme. It compares # each level of the categorical variable to a base reference level. The base # reference level is the value of the intercept. It is the default contrast # in Patsy for unordered categorical factors. The Treatment contrast matrix # for race would be from patsy.contrasts import Treatment levels = [1, 2, 3, 4] contrast = Treatment(reference=0).code_without_intercept(levels) print(contrast.matrix) # Here we used `reference=0`, which implies that the first level, # Hispanic, is the reference category against which the other level effects # are measured. As mentioned above, the columns do not sum to zero and are # thus not independent of the intercept. To be explicit, let's look at how # this would encode the `race` variable. hsb2.race.head(10) print(contrast.matrix[hsb2.race - 1, :][:20]) sm.categorical(hsb2.race.values) # This is a bit of a trick, as the `race` category conveniently maps to # zero-based indices. If it does not, this conversion happens under the # hood, so this will not work in general but nonetheless is a useful exercise # to fix ideas. The below illustrates the output using the three contrasts # above from statsmodels.formula.api import ols mod = ols("write ~ C(race, Treatment)", data=hsb2) res = mod.fit() print(res.summary()) # We explicitly gave the contrast for race; however, since Treatment is # the default, we could have omitted this. # ### Simple Coding # Like Treatment Coding, Simple Coding compares each level to a fixed # reference level. However, with simple coding, the intercept is the grand # mean of all the levels of the factors. Patsy does not have the Simple # contrast included, but you can easily define your own contrasts. To do so, # write a class that contains a code_with_intercept and a # code_without_intercept method that returns a patsy.contrast.ContrastMatrix # instance from patsy.contrasts import ContrastMatrix def _name_levels(prefix, levels): return ["[%s%s]" % (prefix, level) for level in levels] class Simple(object): def _simple_contrast(self, levels): nlevels = len(levels) contr = -1. / nlevels * np.ones((nlevels, nlevels - 1)) contr[1:][np.diag_indices(nlevels - 1)] = (nlevels - 1.) / nlevels return contr def code_with_intercept(self, levels): contrast = np.column_stack((np.ones(len(levels)), self._simple_contrast(levels))) return ContrastMatrix(contrast, _name_levels("Simp.", levels)) def code_without_intercept(self, levels): contrast = self._simple_contrast(levels) return ContrastMatrix(contrast, _name_levels("Simp.", levels[:-1])) hsb2.groupby('race')['write'].mean().mean() contrast = Simple().code_without_intercept(levels) print(contrast.matrix) mod = ols("write ~ C(race, Simple)", data=hsb2) res = mod.fit() print(res.summary()) # ### Sum (Deviation) Coding # Sum coding compares the mean of the dependent variable for a given level # to the overall mean of the dependent variable over all the levels. That # is, it uses contrasts between each of the first k-1 levels and level k In # this example, level 1 is compared to all the others, level 2 to all the # others, and level 3 to all the others. from patsy.contrasts import Sum contrast = Sum().code_without_intercept(levels) print(contrast.matrix) mod = ols("write ~ C(race, Sum)", data=hsb2) res = mod.fit() print(res.summary()) # This corresponds to a parameterization that forces all the coefficients # to sum to zero. Notice that the intercept here is the grand mean where the # grand mean is the mean of means of the dependent variable by each level. hsb2.groupby('race')['write'].mean().mean() # ### Backward Difference Coding # In backward difference coding, the mean of the dependent variable for a # level is compared with the mean of the dependent variable for the prior # level. This type of coding may be useful for a nominal or an ordinal # variable. from patsy.contrasts import Diff contrast = Diff().code_without_intercept(levels) print(contrast.matrix) mod = ols("write ~ C(race, Diff)", data=hsb2) res = mod.fit() print(res.summary()) # For example, here the coefficient on level 1 is the mean of `write` at # level 2 compared with the mean at level 1. Ie., res.params["C(race, Diff)[D.1]"] hsb2.groupby('race').mean()["write"][2] - hsb2.groupby( 'race').mean()["write"][1] # ### Helmert Coding # Our version of Helmert coding is sometimes referred to as Reverse # Helmert Coding. The mean of the dependent variable for a level is compared # to the mean of the dependent variable over all previous levels. Hence, the # name 'reverse' being sometimes applied to differentiate from forward # Helmert coding. This comparison does not make much sense for a nominal # variable such as race, but we would use the Helmert contrast like so: from patsy.contrasts import Helmert contrast = Helmert().code_without_intercept(levels) print(contrast.matrix) mod = ols("write ~ C(race, Helmert)", data=hsb2) res = mod.fit() print(res.summary()) # To illustrate, the comparison on level 4 is the mean of the dependent # variable at the previous three levels taken from the mean at level 4 grouped = hsb2.groupby('race') grouped.mean()["write"][4] - grouped.mean()["write"][:3].mean() # As you can see, these are only equal up to a constant. Other versions of # the Helmert contrast give the actual difference in means. Regardless, the # hypothesis tests are the same. k = 4 1. / k * (grouped.mean()["write"][k] - grouped.mean()["write"][:k - 1].mean()) k = 3 1. / k * (grouped.mean()["write"][k] - grouped.mean()["write"][:k - 1].mean()) # ### Orthogonal Polynomial Coding # The coefficients taken on by polynomial coding for `k=4` levels are the # linear, quadratic, and cubic trends in the categorical variable. The # categorical variable here is assumed to be represented by an underlying, # equally spaced numeric variable. Therefore, this type of encoding is used # only for ordered categorical variables with equal spacing. In general, the # polynomial contrast produces polynomials of order `k-1`. Since `race` is # not an ordered factor variable let's use `read` as an example. First we # need to create an ordered categorical from `read`. hsb2['readcat'] = np.asarray(pd.cut(hsb2.read, bins=3)) hsb2.groupby('readcat').mean()['write'] from patsy.contrasts import Poly levels = hsb2.readcat.unique().tolist() contrast = Poly().code_without_intercept(levels) print(contrast.matrix) mod = ols("write ~ C(readcat, Poly)", data=hsb2) res = mod.fit() print(res.summary()) # As you can see, readcat has a significant linear effect on the dependent # variable `write` but not a significant quadratic or cubic effect.
bsd-3-clause
ml31415/numpy-groupies
numpy_groupies/benchmarks/simple.py
1
4248
#!/usr/bin/python -B # -*- coding: utf-8 -*- from __future__ import print_function import timeit import numpy as np from numpy_groupies.utils import aliasing from numpy_groupies import aggregate_py, aggregate_np, aggregate_ufunc from numpy_groupies.aggregate_pandas import aggregate as aggregate_pd def aggregate_group_loop(*args, **kwargs): """wraps func in lambda which prevents aggregate_numpy from recognising and optimising it. Instead it groups and loops.""" func = kwargs['func'] del kwargs['func'] return aggregate_np(*args, func=lambda x: func(x), **kwargs) print("TODO: use more extensive tests") print("") print("-----simple examples----------") test_a = np.array([12.0, 3.2, -15, 88, 12.9]) test_group_idx = np.array([1, 0, 1, 4, 1 ]) print("test_a: ", test_a) print("test_group_idx: ", test_group_idx) print("aggregate(test_group_idx, test_a):") print(aggregate_np(test_group_idx, test_a)) # group vals by idx and sum # array([3.2, 9.9, 0., 0., 88.]) print("aggregate(test_group_idx, test_a, sz=8, func='min', fill_value=np.nan):") print(aggregate_np(test_group_idx, test_a, size=8, func='min', fill_value=np.nan)) # array([3.2, -15., nan, 88., nan, nan, nan, nan]) print("aggregate(test_group_idx, test_a, sz=5, func=lambda x: ' + '.join(str(xx) for xx in x),fill_value='')") print(aggregate_np(test_group_idx, test_a, size=5, func=lambda x: ' + '.join(str(xx) for xx in x), fill_value='')) print("") print("---------testing--------------") print("compare against group-and-loop with numpy") testable_funcs = {aliasing[f]: f for f in (np.sum, np.prod, np.any, np.all, np.min, np.max, np.std, np.var, np.mean)} test_group_idx = np.random.randint(0, int(1e3), int(1e5)) test_a = np.random.rand(int(1e5)) * 100 - 50 test_a[test_a > 25] = 0 # for use with bool functions for name, f in testable_funcs.items(): numpy_loop_group = aggregate_group_loop(test_group_idx, test_a, func=f) for acc_func, acc_name in [(aggregate_np, 'np-optimised'), (aggregate_ufunc, 'np-ufunc-at'), (aggregate_py, 'purepy'), (aggregate_pd, 'pandas')]: try: test_out = acc_func(test_group_idx, test_a, func=name) test_out = np.asarray(test_out) if not np.allclose(test_out, numpy_loop_group.astype(test_out.dtype)): print(name, acc_name, "FAILED test, output: [" + acc_name + "; correct]...") print(np.vstack((test_out, numpy_loop_group))) else: print(name, acc_name, "PASSED test") except NotImplementedError: print(name, acc_name, "NOT IMPLEMENTED") print("") print("----------benchmarking-------------") print("Note that the actual observed speedup depends on a variety of properties of the input.") print("Here we are using 100,000 indices uniformly picked from [0, 1000).") print("Specifically, about 25% of the values are 0 (for use with bool operations),") print("the remainder are uniformly distribuited on [-50,25).") print("Times are scaled to 10 repetitions (actual number of reps used may not be 10).") print(''.join(['function'.rjust(8), 'pure-py'.rjust(14), 'np-grouploop'.rjust(14), 'np-ufuncat'.rjust(14), 'np-optimised'.rjust(14), 'pandas'.rjust(14), 'ratio'.rjust(15)])) for name, f in testable_funcs.items(): print(name.rjust(8), end='') times = [None] * 5 for ii, acc_func in enumerate([aggregate_py, aggregate_group_loop, aggregate_ufunc, aggregate_np, aggregate_pd]): try: func = f if acc_func is aggregate_group_loop else name reps = 3 if acc_func is aggregate_py else 20 times[ii] = timeit.Timer(lambda: acc_func(test_group_idx, test_a, func=func)).timeit(number=reps) / reps * 10 print(("%.1fms" % ((times[ii] * 1000))).rjust(13), end='') except NotImplementedError: print("no-impl".rjust(13), end='') denom = min(t for t in times if t is not None) ratios = [("-".center(4) if t is None else str(round(t / denom, 1))).center(5) for t in times] print(" ", (":".join(ratios)))
bsd-2-clause
hstau/manifold-cryo
fit_1D_open_manifold_3D.py
1
5015
import numpy as np import get_fit_1D_open_manifold_3D_param import solve_d_R_d_tau_p_3D import a from scipy.io import loadmat import matplotlib.pyplot as plt #import matplotlib.pyplot as plt ''' function [a,b,tau] = fit_1D_open_manifold_3D(psi) % % fit_1D_open_manifold_3D % % fit the eigenvectors for a 1D open manifold to the model % x_ij = a_j cos(j*pi*tau_i) + b_j. % % j goes from 1 to 3 (this is only for 3D systems). % % i goes from 1 to nS where nS is the number of data points to be fitted. % % For a fixed set of a_j and b_j, j=1:3, tau_i for i=1:nS are % obtained by putting dR/d(tau_i) to zero. % % For a fixed set of tau_i, i=1:nS, a_j and b_j for j=1:3 are % obtained by solving 3 sets of 2x2 linear equations. % % Fit parameters and initial set of {\tau} are specified in % % get_fit_1D_open_manifold_3D_param.m % % copyright (c) Russell Fung 2014 %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% global p nDim a b x x_fit ''' ''' def plot_fitted_curve(hFig): global x x_fit h = plt.figure(hFig) hsp = plt.subplot(2,2,1) plot3(x(:,1),x(:,2),x(:,3),'b.','lineWidth',1); hold on plot3(x_fit(:,1),x_fit(:,2),x_fit(:,3),'g.','lineWidth',1); hold off set(hsp,'lineWidth',2,'fontSize',15); hsp = subplot(2,2,2); plotRF(hsp,x(:,1),x(:,2),'','','','b.'); addplotRF(hsp,x_fit(:,1),x_fit(:,2),'g.'); hsp = subplot(2,2,3); plotRF(hsp,x(:,1),x(:,3),'','','','b.'); addplotRF(hsp,x_fit(:,1),x_fit(:,3),'g.'); hsp = subplot(2,2,4); plotRF(hsp,x(:,2),x(:,3),'','','','b.'); addplotRF(hsp,x_fit(:,2),x_fit(:,3),'g.'); drawnow %end ''' eps = 1e-4 #global maxIter,delta_a_max, delta_b_max,delta_tau_max,a_b_tau_result def op(psi): a.init() #global p, nDim, a, b, x, x_fit a.nDim = 3 #tau = get_fit_1D_open_manifold_3D_param tau = get_fit_1D_open_manifold_3D_param.op(psi) aux = np.zeros((tau.shape[0],5)) #added nS = a.x.shape[0] for iter in xrange(1,a.maxIter+1): string ='iteration ' + str(iter) print string ''' #%%%%%%%%%%%%%%%%%%%%% #% solve for a and b % #%%%%%%%%%%%%%%%%%%%%% ''' a_old = a.a b_old = a.b j_pi_tau = np.dot(tau,np.pi*np.array([[1,2,3]])) cos_j_pi_tau = np.cos(j_pi_tau) A11 = np.sum(cos_j_pi_tau**2, axis=0) A12 = np.sum(cos_j_pi_tau, axis=0) A21 = A12 A22 = nS x_cos_j_pi_tau = a.x*cos_j_pi_tau b1 = np.sum(x_cos_j_pi_tau, axis=0) b2 = np.sum(a.x, axis=0) coeff = np.zeros((2,3)) for qq in xrange(3): A = np.array([[A11[qq],A12[qq]],[A21[qq], A22]]) b = np.array([b1[qq], b2[qq]]) coeff[:,qq] = np.linalg.solve(A,b) a.a = coeff[0,:] a.b = coeff[1,:] ''' %%%%%%%%%%%%%%%%%%%%%%%%% #% plot the fitted curve % %%%%%%%%%%%%%%%%%%%%%%%%% ''' j_pi_tau = np.dot(np.linspace(0,1,1000).reshape(-1,1),np.array([[1,2,3]]))*np.pi cos_j_pi_tau = np.cos(j_pi_tau) tmp = a.a*cos_j_pi_tau a.x_fit = tmp + a.b #%plot_fitted_curve(iter) ''' %%%%%%%%%%%%%%%%% #% solve for tau % %%%%%%%%%%%%%%%%% ''' tau_old = tau for a.p in xrange(nS): tau[a.p],beta = solve_d_R_d_tau_p_3D.op() #added for kk in xrange(beta.shape[0]): aux[a.p,kk] = beta[kk] ''' if iter == 0: data = loadmat('aux0.mat') # (this is for < v7.3 elif iter == 1: data = loadmat('aux1.mat') # (this is for < v7.3 else: data = loadmat('aux2.mat') # (this is for < v7.3 imaux = data['aux'] plt.subplot(2, 2, 1) plt.imshow(aux, cmap=plt.get_cmap('gray'),aspect=0.1) plt.title('aux') plt.subplot(2, 2, 2) plt.imshow(imaux, cmap=plt.get_cmap('gray'), aspect=0.1) plt.title('imaux') plt.show() ''' ''' %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% #% calculate the changes in fitting parameters % #%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% ''' delta_a = np.fabs(a.a-a_old)/(np.fabs(a.a)+eps) delta_b = np.fabs(a.b-b_old)/(np.fabs(a.b)+eps) delta_tau = np.fabs(tau-tau_old) delta_a = max(delta_a)*100 delta_b = max(delta_b)*100 delta_tau = max(delta_tau) print ' changes in fitting parameters: \n' string = ' amplitudes: '+ str(delta_a) + '\n' + \ ' offsets: ' + str(delta_b) + ' \n' +\ ' values of tau: ' + str(delta_tau) + ' \n' print string if (delta_a<a.delta_a_max) and (delta_b < a.delta_b_max) and (delta_tau < a.delta_tau_max): break return (a.a,a.b,tau)
gpl-2.0
nmayorov/scikit-learn
examples/plot_multilabel.py
236
4157
# Authors: Vlad Niculae, Mathieu Blondel # License: BSD 3 clause """ ========================= Multilabel classification ========================= This example simulates a multi-label document classification problem. The dataset is generated randomly based on the following process: - pick the number of labels: n ~ Poisson(n_labels) - n times, choose a class c: c ~ Multinomial(theta) - pick the document length: k ~ Poisson(length) - k times, choose a word: w ~ Multinomial(theta_c) In the above process, rejection sampling is used to make sure that n is more than 2, and that the document length is never zero. Likewise, we reject classes which have already been chosen. The documents that are assigned to both classes are plotted surrounded by two colored circles. The classification is performed by projecting to the first two principal components found by PCA and CCA for visualisation purposes, followed by using the :class:`sklearn.multiclass.OneVsRestClassifier` metaclassifier using two SVCs with linear kernels to learn a discriminative model for each class. Note that PCA is used to perform an unsupervised dimensionality reduction, while CCA is used to perform a supervised one. Note: in the plot, "unlabeled samples" does not mean that we don't know the labels (as in semi-supervised learning) but that the samples simply do *not* have a label. """ print(__doc__) import numpy as np import matplotlib.pyplot as plt from sklearn.datasets import make_multilabel_classification from sklearn.multiclass import OneVsRestClassifier from sklearn.svm import SVC from sklearn.preprocessing import LabelBinarizer from sklearn.decomposition import PCA from sklearn.cross_decomposition import CCA def plot_hyperplane(clf, min_x, max_x, linestyle, label): # get the separating hyperplane w = clf.coef_[0] a = -w[0] / w[1] xx = np.linspace(min_x - 5, max_x + 5) # make sure the line is long enough yy = a * xx - (clf.intercept_[0]) / w[1] plt.plot(xx, yy, linestyle, label=label) def plot_subfigure(X, Y, subplot, title, transform): if transform == "pca": X = PCA(n_components=2).fit_transform(X) elif transform == "cca": X = CCA(n_components=2).fit(X, Y).transform(X) else: raise ValueError min_x = np.min(X[:, 0]) max_x = np.max(X[:, 0]) min_y = np.min(X[:, 1]) max_y = np.max(X[:, 1]) classif = OneVsRestClassifier(SVC(kernel='linear')) classif.fit(X, Y) plt.subplot(2, 2, subplot) plt.title(title) zero_class = np.where(Y[:, 0]) one_class = np.where(Y[:, 1]) plt.scatter(X[:, 0], X[:, 1], s=40, c='gray') plt.scatter(X[zero_class, 0], X[zero_class, 1], s=160, edgecolors='b', facecolors='none', linewidths=2, label='Class 1') plt.scatter(X[one_class, 0], X[one_class, 1], s=80, edgecolors='orange', facecolors='none', linewidths=2, label='Class 2') plot_hyperplane(classif.estimators_[0], min_x, max_x, 'k--', 'Boundary\nfor class 1') plot_hyperplane(classif.estimators_[1], min_x, max_x, 'k-.', 'Boundary\nfor class 2') plt.xticks(()) plt.yticks(()) plt.xlim(min_x - .5 * max_x, max_x + .5 * max_x) plt.ylim(min_y - .5 * max_y, max_y + .5 * max_y) if subplot == 2: plt.xlabel('First principal component') plt.ylabel('Second principal component') plt.legend(loc="upper left") plt.figure(figsize=(8, 6)) X, Y = make_multilabel_classification(n_classes=2, n_labels=1, allow_unlabeled=True, random_state=1) plot_subfigure(X, Y, 1, "With unlabeled samples + CCA", "cca") plot_subfigure(X, Y, 2, "With unlabeled samples + PCA", "pca") X, Y = make_multilabel_classification(n_classes=2, n_labels=1, allow_unlabeled=False, random_state=1) plot_subfigure(X, Y, 3, "Without unlabeled samples + CCA", "cca") plot_subfigure(X, Y, 4, "Without unlabeled samples + PCA", "pca") plt.subplots_adjust(.04, .02, .97, .94, .09, .2) plt.show()
bsd-3-clause
phobson/wqio
wqio/tests/test_datacollections.py
2
28761
from distutils.version import LooseVersion from textwrap import dedent from io import StringIO import numpy import scipy from scipy import stats import pandas from unittest import mock import pytest import pandas.testing as pdtest from wqio.tests import helpers from wqio.features import Location, Dataset from wqio.datacollections import DataCollection, _dist_compare OLD_SCIPY = LooseVersion(scipy.version.version) < LooseVersion("0.19") def check_stat(expected_csv, result, comp=False): index_col = [0] if comp: index_col += [1] file_obj = StringIO(dedent(expected_csv)) expected = pandas.read_csv(file_obj, header=[0, 1], index_col=index_col) if comp: expected = expected.stack(level=-1) pdtest.assert_frame_equal( expected.sort_index(axis="columns"), result.sort_index(axis="columns").round(6), atol=1e-5, ) def remove_g_and_h(group): return group.name[1] not in ["G", "H"] @pytest.fixture def dc(): df = helpers.make_dc_data_complex() dc = DataCollection( df, rescol="res", qualcol="qual", stationcol="loc", paramcol="param", ndval="<", othergroups=None, pairgroups=["state", "bmp"], useros=True, filterfxn=remove_g_and_h, bsiter=10000, ) return dc @pytest.fixture def dc_noNDs(): df = helpers.make_dc_data_complex() dc = DataCollection( df, rescol="res", qualcol="qual", stationcol="loc", paramcol="param", ndval="junk", othergroups=None, pairgroups=["state", "bmp"], useros=True, filterfxn=remove_g_and_h, bsiter=10000, ) return dc def test_basic_attr(dc): assert dc._raw_rescol == "res" assert isinstance(dc.data, pandas.DataFrame) assert dc.roscol == "ros_res" assert dc.rescol == "ros_res" assert dc.qualcol == "qual" assert dc.stationcol == "loc" assert dc.paramcol == "param" assert dc.ndval == ["<"] assert dc.bsiter == 10000 assert dc.groupcols == ["loc", "param"] assert dc.tidy_columns == ["loc", "param", "res", "__censorship"] assert hasattr(dc, "filterfxn") def test_data(dc): assert isinstance(dc.data, pandas.DataFrame) assert dc.data.shape == (519, 8) assert "G" in dc.data["param"].unique() assert "H" in dc.data["param"].unique() @pytest.mark.parametrize("useros", [True, False]) def test_tidy(dc, useros): assert isinstance(dc.tidy, pandas.DataFrame) assert dc.tidy.shape == (388, 5) assert "G" not in dc.tidy["param"].unique() assert "H" not in dc.tidy["param"].unique() collist = ["loc", "param", "res", "__censorship", "ros_res"] assert dc.tidy.columns.tolist() == collist def test_paired(dc): assert isinstance(dc.paired, pandas.DataFrame) assert dc.paired.shape == (164, 6) assert "G" not in dc.paired.index.get_level_values("param").unique() assert "H" not in dc.paired.index.get_level_values("param").unique() dc.paired.columns.tolist() == [ ("res", "Inflow"), ("res", "Outflow"), ("res", "Reference"), ("__censorship", "Inflow"), ("__censorship", "Outflow"), ("__censorship", "Reference"), ] def test_count(dc): known_csv = """\ station,Inflow,Outflow,Reference result,Count,Count,Count param,,, A,21,22,20 B,24,22,19 C,24,24,25 D,24,25,21 E,19,16,20 F,21,24,17 """ check_stat(known_csv, dc.count) def test_n_unique(dc): known_csv = """\ loc,Inflow,Outflow,Reference result,bmp,bmp,bmp param,,, A,7,7,7 B,7,7,7 C,7,7,7 D,7,7,7 E,7,7,7 F,7,7,7 G,7,7,7 H,7,7,7 """ check_stat(known_csv, dc.n_unique("bmp")) @helpers.seed def test_median(dc): known_csv = """\ station,Inflow,Inflow,Inflow,Outflow,Outflow,Outflow,Reference,Reference,Reference result,lower,median,upper,lower,median,upper,lower,median,upper param,,,,,,,,, A,0.334506,1.197251,2.013994,0.860493,2.231058,2.626023,1.073386,1.639472,1.717293 B,1.366948,2.773989,3.297147,0.23201,1.546499,2.579206,0.204164,1.565076,2.196367 C,0.17351,0.525957,0.68024,0.247769,0.396984,0.540742,0.136462,0.412693,0.559458 D,0.374122,1.201892,2.098846,0.516989,1.362759,1.827087,0.314655,0.882695,1.24545 E,0.276095,1.070858,1.152887,0.287914,0.516746,1.456859,0.366824,0.80716,2.040739 F,0.05667,0.832488,1.310575,0.425237,1.510942,2.193997,0.162327,0.745993,1.992513 """ check_stat(known_csv, dc.median) @helpers.seed def test_mean(dc): known_csv = """\ station,Inflow,Inflow,Inflow,Outflow,Outflow,Outflow,Reference,Reference,Reference result,lower,mean,upper,lower,mean,upper,lower,mean,upper param,,,,,,,,, A,1.231607,2.646682,4.204054,1.930601,5.249281,9.081952,1.540167,3.777974,6.389439 B,2.99031,7.647175,12.810844,1.545539,6.863835,12.705913,1.010374,4.504255,9.592572 C,0.37496,0.513248,0.65948,0.411501,1.004637,1.706317,0.35779,0.541962,0.734751 D,1.29141,3.021235,4.987855,1.285899,2.318808,3.451824,1.008364,1.945828,2.924812 E,0.818641,1.914696,3.049554,0.584826,1.098241,1.640807,1.113589,2.283292,3.581946 F,0.8379,9.825404,25.289933,1.497825,3.450184,5.61929,0.939917,2.491708,4.094258 """ check_stat(known_csv, dc.mean) @helpers.seed def test_std_dev(dc): known_csv = """\ station,Inflow,Outflow,Reference result,std. dev.,std. dev.,std. dev. param,,, A,3.58649,8.719371,5.527633 B,12.360099,13.60243,10.759285 C,0.353755,1.691208,0.493325 D,4.811938,2.849393,2.248178 E,2.55038,1.096698,2.789238 F,34.447565,5.361033,3.398367 """ check_stat(known_csv, dc.std_dev) @helpers.seed def test_percentile_25(dc): known_csv = """\ station,Inflow,Outflow,Reference result,pctl 25,pctl 25,pctl 25 param,,, A,0.522601,0.906029,1.094721 B,1.472541,0.251126,0.314226 C,0.164015,0.267521,0.136462 D,0.35688,0.516989,0.383895 E,0.364748,0.311508,0.394658 F,0.120068,0.406132,0.224429 """ check_stat(known_csv, dc.percentile(25)) @helpers.seed def test_percentile_75(dc): known_csv = """\ station,Inflow,Outflow,Reference result,pctl 75,pctl 75,pctl 75 param,,, A,2.563541,3.838021,2.650648 B,4.728871,2.849948,2.261847 C,0.776388,0.853535,0.792612 D,3.04268,2.79341,3.611793 E,1.532775,1.59183,3.201534 F,1.792985,2.80979,2.742249 """ check_stat(known_csv, dc.percentile(75)) @helpers.seed def test_logmean(dc): known_csv = """\ station,Inflow,Inflow,Inflow,Outflow,Outflow,Outflow,Reference,Reference,Reference result,Log-mean,lower,upper,Log-mean,lower,upper,Log-mean,lower,upper param,,,,,,,,, A,0.140559,-0.55112,0.644202,0.733004,0.047053,1.22099,0.545205,-0.057683,1.029948 B,1.026473,0.368659,1.541241,0.105106,-0.939789,0.860244,0.068638,-0.932357,0.661203 C,-0.963004,-1.304115,-0.638446,-0.83221,-1.464092,-0.414379,-1.088377,-1.556795,-0.720706 D,0.062317,-0.663241,0.58349,0.185757,-0.325074,0.598432,-0.063507,-0.670456,0.434214 E,-0.103655,-0.751075,0.385909,-0.456202,-1.08692,0.029967,-0.068135,-0.787007,0.51226 F,-0.442721,-1.874677,0.344704,0.211658,-0.504166,0.734283,-0.253352,-1.175917,0.467231 """ check_stat(known_csv, dc.logmean) @helpers.seed def test_logstd_dev(dc): known_csv = """\ station,Inflow,Outflow,Reference result,Log-std. dev.,Log-std. dev.,Log-std. dev. param,,, A,1.374026,1.343662,1.225352 B,1.430381,2.07646,1.662001 C,0.818504,1.263631,1.057177 D,1.530871,1.187246,1.277927 E,1.264403,1.121038,1.474431 F,2.324063,1.516331,1.701596 """ check_stat(known_csv, dc.logstd_dev) @helpers.seed def test_geomean(dc): known_csv = """\ station,Inflow,Inflow,Inflow,Outflow,Outflow,Outflow,Reference,Reference,Reference Geo-mean,Log-mean,lower,upper,Log-mean,lower,upper,Log-mean,lower,upper param,,,,,,,,, A,1.150917,0.576304,1.904467,2.081323,1.048178,3.390543,1.724962,0.943949,2.800919 B,2.791205,1.445795,4.670381,1.110829,0.39071,2.363737,1.071049,0.393625,1.937121 C,0.381744,0.271413,0.528113,0.435087,0.231288,0.66075,0.336763,0.210811,0.486409 D,1.064299,0.515179,1.792283,1.204129,0.722474,1.819264,0.938467,0.511475,1.543749 E,0.901536,0.471859,1.470951,0.633686,0.337254,1.03042,0.934134,0.455205,1.66906 F,0.642286,0.153405,1.411572,1.235726,0.604009,2.083988,0.776195,0.308536,1.595571 """ check_stat(known_csv, dc.geomean) @helpers.seed def test_geostd_dev(dc): known_csv = """\ station,Inflow,Outflow,Reference Geo-std. dev.,Log-std. dev.,Log-std. dev.,Log-std. dev. param,,, A,3.951225,3.833055,3.405365 B,4.180294,7.976181,5.269843 C,2.267105,3.538244,2.878234 D,4.622199,3.278041,3.589191 E,3.540977,3.068036,4.368548 F,10.217099,4.55548,5.48269 """ check_stat(known_csv, dc.geostd_dev) @helpers.seed def test_shapiro(dc): known_csv = """\ station,Inflow,Inflow,Outflow,Outflow,Reference,Reference result,pvalue,statistic,pvalue,statistic,pvalue,statistic param,,,,,, A,1.8e-05,0.685783,1e-06,0.576069,4e-06,0.61735 B,1e-06,0.594411,0.0,0.530962,0.0,0.41471 C,0.028774,0.905906,0.0,0.546626,0.00279,0.860373 D,1e-06,0.622915,1.5e-05,0.722374,0.000202,0.76518 E,1.7e-05,0.654137,0.004896,0.818813,0.000165,0.74917 F,0.0,0.292916,2e-06,0.634671,0.000167,0.713968 """ check_stat(known_csv, dc.shapiro) @helpers.seed def test_shapiro_log(dc): known_csv = """\ station,Inflow,Inflow,Outflow,Outflow,Reference,Reference result,statistic,pvalue,statistic,pvalue,statistic,pvalue param,,,,,, A,0.983521938,0.96662426,0.979861856,0.913820148,0.939460814,0.234214202 B,0.957531095,0.390856266,0.97048676,0.722278714,0.967978418,0.735424638 C,0.906479359,0.029602444,0.974698305,0.78197974,0.967106879,0.572929323 D,0.989704251,0.995502174,0.990663111,0.997093379,0.964812279,0.617747009 E,0.955088913,0.479993254,0.95211035,0.523841977,0.963425279,0.61430341 F,0.97542423,0.847370088,0.982230783,0.933124721,0.966197193,0.749036908 """ check_stat(known_csv, dc.shapiro_log) @helpers.seed def test_lilliefors(dc): known_csv = """\ station,Inflow,Inflow,Outflow,Outflow,Reference,Reference result,lilliefors,pvalue,lilliefors,pvalue,lilliefors,pvalue param,,,,,, A,0.308131,1.4e-05,0.340594,0.0,0.364453,0.0 B,0.36764,0.0,0.420343,0.0,0.417165,0.0 C,0.166799,0.082737,0.324733,0.0,0.161753,0.090455 D,0.273012,6.7e-05,0.240311,0.000665,0.296919,3.7e-05 E,0.341398,3e-06,0.239314,0.014862,0.233773,0.005474 F,0.419545,0.0,0.331315,0.0,0.284249,0.000741 """ check_stat(known_csv, dc.lilliefors) @helpers.seed def test_lilliefors_log(dc): known_csv = """\ station,Inflow,Inflow,Outflow,Outflow,Reference,Reference result,log-lilliefors,pvalue,log-lilliefors,pvalue,log-lilliefors,pvalue param,,,,,, A,0.08548109,0.95458004,0.15443943,0.19715747,0.20141389,0.03268737 B,0.16162839,0.10505016,0.12447902,0.49697902,0.15934334,0.22969362 C,0.16957278,0.07248915,0.12388174,0.44379732,0.11746642,0.48915671 D,0.06885549,0.99,0.06067356,0.99,0.13401954,0.41967483 E,0.13506577,0.47186822,0.14552341,0.47797919,0.09164876,0.92860794 F,0.14420794,0.30694533,0.08463267,0.92741885,0.08586933,0.9800294 """ check_stat(known_csv, dc.lilliefors_log) @helpers.seed def test_anderson_darling(dc): with helpers.raises(NotImplementedError): _ = dc.anderson_darling @helpers.seed def test_anderson_darling_log(dc): with helpers.raises(NotImplementedError): _ = dc.anderson_darling_log @helpers.seed def test_mann_whitney(dc): known_csv = """\ ,,mann_whitney,mann_whitney,mann_whitney,pvalue,pvalue,pvalue loc_2,,Inflow,Outflow,Reference,Inflow,Outflow,Reference param,loc_1,,,,,, A,Inflow,,180.0,179.0,,0.2198330905,0.4263216587 A,Outflow,282.0,,248.0,0.2198330905,,0.488580368 A,Reference,241.0,192.0,,0.4263216587,0.488580368, B,Inflow,,345.0,317.0,,0.0766949991,0.0304383994 B,Outflow,183.0,,216.0,0.0766949991,,0.8650586835 B,Reference,139.0,202.0,,0.0304383994,0.8650586835, C,Inflow,,282.0,323.0,,0.9097070273,0.6527104406 C,Outflow,294.0,,323.0,0.9097070273,,0.6527104406 C,Reference,277.0,277.0,,0.6527104406,0.6527104406, D,Inflow,,285.0,263.0,,0.7718162376,0.8111960975 D,Outflow,315.0,,293.0,0.7718162376,,0.5082395211 D,Reference,241.0,232.0,,0.8111960975,0.5082395211, E,Inflow,,164.0,188.0,,0.7033493939,0.9663820218 E,Outflow,140.0,,132.0,0.7033493939,,0.3813114322 E,Reference,192.0,188.0,,0.9663820218,0.3813114322, F,Inflow,,201.0,172.0,,0.2505911218,0.8601783903 F,Outflow,303.0,,236.0,0.2505911218,,0.4045186043 F,Reference,185.0,172.0,,0.8601783903,0.4045186043 """ check_stat(known_csv, dc.mann_whitney, comp=True) @helpers.seed def test_t_test(dc): known_csv = """\ ,,pvalue,pvalue,pvalue,t_test,t_test,t_test loc_2,,Inflow,Outflow,Reference,Inflow,Outflow,Reference param,loc_1,,,,,, A,Inflow,,0.2178424157,0.4563196599,,-1.2604458127,-0.7539785777 A,Outflow,0.2178424157,,0.5240147979,1.2604458127,,0.643450194 A,Reference,0.4563196599,0.5240147979,,0.7539785777,-0.643450194, B,Inflow,,0.8430007638,0.3898358794,,0.1992705833,0.869235357 B,Outflow,0.8430007638,,0.5491097882,-0.1992705833,,0.6043850808 B,Reference,0.3898358794,0.5491097882,,-0.869235357,-0.6043850808, C,Inflow,,0.1847386316,0.8191392537,,-1.3639360123,-0.2300373632 C,Outflow,0.1847386316,,0.2179907667,1.3639360123,,1.2615982727 C,Reference,0.8191392537,0.2179907667,,0.2300373632,-1.2615982727, D,Inflow,,0.5484265023,0.344783812,,0.6056706932,0.9582600001 D,Outflow,0.5484265023,,0.6299742693,-0.6056706932,,0.4851636024 D,Reference,0.344783812,0.6299742693,,-0.9582600001,-0.4851636024, E,Inflow,,0.2304569921,0.6770414622,,1.2287029977,-0.4198288251 E,Outflow,0.2304569921,,0.1023435465,-1.2287029977,,-1.6935358498 E,Reference,0.6770414622,0.1023435465,,0.4198288251,1.6935358498, F,Inflow,,0.422008391,0.3549979666,,0.8190789273,0.9463539528 F,Outflow,0.422008391,,0.4988994144,-0.8190789273,,0.6826435968 F,Reference,0.3549979666,0.4988994144,,-0.9463539528,-0.6826435968 """ check_stat(known_csv, dc.t_test, comp=True) @helpers.seed def test_levene(dc): known_csv = """\ ,,levene,levene,levene,pvalue,pvalue,pvalue loc_2,,Inflow,Outflow,Reference,Inflow,Outflow,Reference param,loc_1,,,,,, A,Inflow,,1.176282059,0.293152155,,0.284450688,0.591287419 A,Outflow,1.176282059,,0.397705309,0.284450688,,0.531863542 A,Reference,0.293152155,0.397705309,,0.591287419,0.531863542, B,Inflow,,0.003559637,0.402002411,,0.952694449,0.529578712 B,Outflow,0.003559637,,0.408938588,0.952694449,,0.526247443 B,Reference,0.402002411,0.408938588,,0.529578712,0.526247443, C,Inflow,,1.965613561,0.679535532,,0.167626459,0.413910674 C,Outflow,1.965613561,,1.462364363,0.167626459,,0.232602352 C,Reference,0.679535532,1.462364363,,0.413910674,0.232602352, D,Inflow,,0.643364813,0.983777911,,0.426532092,0.32681669 D,Outflow,0.643364813,,0.116830634,0.426532092,,0.734124856 D,Reference,0.983777911,0.116830634,,0.32681669,0.734124856, E,Inflow,,0.961616536,0.410491665,,0.333914902,0.525668596 E,Outflow,0.961616536,,2.726351564,0.333914902,,0.107912818 E,Reference,0.410491665,2.726351564,,0.525668596,0.107912818, F,Inflow,,0.841984453,0.734809611,,0.363948105,0.396999375 F,Outflow,0.841984453,,0.25881357,0.363948105,,0.613802541 F,Reference,0.734809611,0.25881357,,0.396999375,0.613802541, """ check_stat(known_csv, dc.levene, comp=True) @helpers.seed def test_wilcoxon(dc): known_csv = """\ ,,wilcoxon,wilcoxon,wilcoxon,pvalue,pvalue,pvalue loc_2,,Inflow,Outflow,Reference,Inflow,Outflow,Reference param,loc_1,,,,,, A,Inflow,,32.0,59.0,,0.03479,0.430679 A,Outflow,32.0,,46.0,0.03479,,0.274445 A,Reference,59.0,46.0,,0.430679,0.274445, B,Inflow,,38.0,22.0,,0.600179,0.182338 B,Outflow,38.0,,31.0,0.600179,,0.858863 B,Reference,22.0,31.0,,0.182338,0.858863, C,Inflow,,75.0,120.0,,0.167807,0.601046 C,Outflow,75.0,,113.0,0.167807,,0.463381 C,Reference,120.0,113.0,,0.601046,0.463381, D,Inflow,,44.0,31.0,,0.593618,0.530285 D,Outflow,44.0,,45.0,0.593618,,0.972125 D,Reference,31.0,45.0,,0.530285,0.972125, E,Inflow,,21.0,19.0,,0.910156,0.386271 E,Outflow,21.0,,16.0,0.910156,,0.077148 E,Reference,19.0,16.0,,0.386271,0.077148, F,Inflow,,62.0,22.0,,0.492459,0.952765 F,Outflow,62.0,,28.0,0.492459,,0.656642 F,Reference,22.0,28.0,,0.952765,0.656642, """ with pytest.warns(UserWarning): check_stat(known_csv, dc.wilcoxon, comp=True) @helpers.seed def test_ranksums(dc): known_csv = """\ ,,pvalue,pvalue,pvalue,rank_sums,rank_sums,rank_sums loc_2,,Inflow,Outflow,Reference,Inflow,Outflow,Reference param,loc_1,,,,,, A,Inflow,,0.2153009,0.4187782,,-1.2391203,-0.8085428 A,Outflow,0.2153009,,0.4807102,1.2391203,,0.7051607 A,Reference,0.4187782,0.4807102,,0.8085428,-0.7051607, B,Inflow,,0.0748817,0.029513,,1.781188,2.1765661 B,Outflow,0.0748817,,0.8547898,-1.781188,,0.1830104 B,Reference,0.029513,0.8547898,,-2.1765661,-0.1830104, C,Inflow,,0.9015386,0.6455162,,-0.1237179,0.46 C,Outflow,0.9015386,,0.6455162,0.1237179,,0.46 C,Reference,0.6455162,0.6455162,,-0.46,-0.46, D,Inflow,,0.7641772,0.8023873,,-0.3,0.2502587 D,Outflow,0.7641772,,0.5011969,0.3,,0.6726078 D,Reference,0.8023873,0.5011969,,-0.2502587,-0.6726078, E,Inflow,,0.6911022,0.9551863,,0.3973597,-0.0561951 E,Outflow,0.6911022,,0.3727144,-0.3973597,,-0.8914004 E,Reference,0.9551863,0.3727144,,0.0561951,0.8914004, F,Inflow,,0.2459307,0.8486619,,-1.1602902,-0.190826 F,Outflow,0.2459307,,0.3971011,1.1602902,,0.8468098 F,Reference,0.8486619,0.3971011,,0.190826,-0.8468098, """ check_stat(known_csv, dc.ranksums, comp=True) @helpers.seed @pytest.mark.xfail(OLD_SCIPY, reason="Scipy < 0.19") def test_kendall(dc): known_csv = """\ ,,kendalltau,kendalltau,kendalltau,pvalue,pvalue,pvalue loc_2,,Inflow,Outflow,Reference,Inflow,Outflow,Reference param,loc_1,,,,,, A,Inflow,,-0.051661,-0.00738,,0.772893,0.967114 A,Outflow,-0.051661,,-0.083333,0.772893,,0.690095 A,Reference,-0.00738,-0.083333,,0.967114,0.690095, B,Inflow,,0.441351,0.298246,,0.015267,0.119265 B,Outflow,0.441351,,0.559855,0.015267,,0.004202 B,Reference,0.298246,0.559855,,0.119265,0.004202, C,Inflow,,0.280223,0.084006,,0.078682,0.578003 C,Outflow,0.280223,,-0.1417,0.078682,,0.352394 C,Reference,0.084006,-0.1417,,0.578003,0.352394, D,Inflow,,0.403469,0.095299,,0.020143,0.634826 D,Outflow,0.403469,,0.318337,0.020143,,0.094723 D,Reference,0.095299,0.318337,,0.634826,0.094723, E,Inflow,,0.114286,0.640703,,0.673337,0.004476 E,Outflow,0.114286,,0.167944,0.673337,,0.449603 E,Reference,0.640703,0.167944,,0.004476,0.449603, F,Inflow,,0.0,0.07231,,1.0,0.763851 F,Outflow,0.0,,0.388889,1.0,,0.063 F,Reference,0.07231,0.388889,,0.763851,0.063, """ check_stat(known_csv, dc.kendall, comp=True) @helpers.seed def test_spearman(dc): known_csv = """\ ,,pvalue,pvalue,pvalue,spearmanrho,spearmanrho,spearmanrho loc_2,,Inflow,Outflow,Reference,Inflow,Outflow,Reference param,loc_1,,,,,, A,Inflow,,0.7574884491,0.9627447553,,-0.0809319588,0.012262418 A,Outflow,0.7574884491,,0.7617330788,-0.0809319588,,-0.0823529412 A,Reference,0.9627447553,0.7617330788,,0.012262418,-0.0823529412, B,Inflow,,0.0110829791,0.0775159774,,0.5831305575,0.4537313433 B,Outflow,0.0110829791,,0.0024069317,0.5831305575,,0.6850916941 B,Reference,0.0775159774,0.0024069317,,0.4537313433,0.6850916941, C,Inflow,,0.1330504059,0.6063501968,,0.3387640122,0.1134228342 C,Outflow,0.1330504059,,0.3431640379,0.3387640122,,-0.2070506455 C,Reference,0.6063501968,0.3431640379,,0.1134228342,-0.2070506455, D,Inflow,,0.0195715066,0.4751861062,,0.4935814032,0.1858231711 D,Outflow,0.0195715066,,0.1263974782,0.4935814032,,0.363209462 D,Reference,0.4751861062,0.1263974782,,0.1858231711,0.363209462, E,Inflow,,0.9828818202,0.0013596162,,0.0084033613,0.8112988341 E,Outflow,0.9828818202,,0.3413722947,0.0084033613,,0.3012263814 E,Reference,0.0013596162,0.3413722947,,0.8112988341,0.3012263814, F,Inflow,,0.9645303744,0.6759971848,,-0.0106277141,0.1348767061 F,Outflow,0.9645303744,,0.0560590794,-0.0106277141,,0.5028571429 F,Reference,0.6759971848,0.0560590794,,0.1348767061,0.5028571429 """ check_stat(known_csv, dc.spearman, comp=True) @helpers.seed def test_theilslopes(dc): with helpers.raises(NotImplementedError): _ = dc.theilslopes def test_inventory(dc): known_csv = StringIO( dedent( """\ loc,param,Count,Non-Detect Inflow,A,21,3 Inflow,B,24,6 Inflow,C,24,0 Inflow,D,24,11 Inflow,E,19,4 Inflow,F,21,8 Outflow,A,22,1 Outflow,B,22,9 Outflow,C,24,4 Outflow,D,25,12 Outflow,E,16,2 Outflow,F,24,8 Reference,A,20,2 Reference,B,19,6 Reference,C,25,4 Reference,D,21,12 Reference,E,20,3 Reference,F,17,7 """ ) ) expected = pandas.read_csv(known_csv, index_col=[0, 1]).astype(int) pdtest.assert_frame_equal(expected, dc.inventory.astype(int), check_names=False) def test_inventory_noNDs(dc_noNDs): known_csv = StringIO( dedent( """\ loc,param,Count,Non-Detect Inflow,A,21,0 Inflow,B,24,0 Inflow,C,24,0 Inflow,D,24,0 Inflow,E,19,0 Inflow,F,21,0 Outflow,A,22,0 Outflow,B,22,0 Outflow,C,24,0 Outflow,D,25,0 Outflow,E,16,0 Outflow,F,24,0 Reference,A,20,0 Reference,B,19,0 Reference,C,25,0 Reference,D,21,0 Reference,E,20,0 Reference,F,17,0 """ ) ) expected = pandas.read_csv(known_csv, index_col=[0, 1]).astype(int) pdtest.assert_frame_equal( expected, dc_noNDs.inventory.astype(int), check_names=False, ) @helpers.seed def test_stat_summary(dc): known_csv = StringIO( dedent( """\ ros_res,loc,A,B,C,D,E,F Count,Inflow,21,24,24,24,19,21 Count,Outflow,22,22,24,25,16,24 Count,Reference,20,19,25,21,20,17 Non-Detect,Inflow,3.0,6.0,0.0,11.0,4.0,8.0 Non-Detect,Outflow,1.0,9.0,4.0,12.0,2.0,8.0 Non-Detect,Reference,2.0,6.0,4.0,12.0,3.0,7.0 mean,Inflow,2.64668,7.64717,0.51325,3.02124,1.9147,9.8254 mean,Outflow,5.24928,6.86384,1.00464,2.31881,1.09824,3.45018 mean,Reference,3.77797,4.50425,0.54196,1.94583,2.28329,2.49171 std,Inflow,3.67506,12.62594,0.36136,4.91543,2.62027,35.29825 std,Outflow,8.92456,13.92253,1.72758,2.90815,1.13267,5.47634 std,Reference,5.67123,11.05411,0.5035,2.3037,2.8617,3.50296 min,Inflow,0.0756,0.17404,0.10213,0.05365,0.08312,0.00803 min,Outflow,0.11177,0.02106,0.03578,0.11678,0.07425,0.06377 min,Reference,0.15575,0.04909,0.04046,0.08437,0.05237,0.03445 10%,Inflow,0.1772,0.45233,0.13467,0.15495,0.1763,0.03548 10%,Outflow,0.44852,0.08297,0.08222,0.26949,0.19903,0.18008 10%,Reference,0.38448,0.13467,0.08241,0.19355,0.12777,0.09457 25%,Inflow,0.5226,1.47254,0.16401,0.35688,0.36475,0.12007 25%,Outflow,0.90603,0.25113,0.26752,0.51699,0.31151,0.40613 25%,Reference,1.09472,0.31423,0.13646,0.3839,0.39466,0.22443 50%,Inflow,1.19725,2.77399,0.52596,1.20189,1.07086,0.83249 50%,Outflow,2.23106,1.5465,0.39698,1.36276,0.51675,1.51094 50%,Reference,1.63947,1.56508,0.41269,0.8827,0.80716,0.74599 75%,Inflow,2.56354,4.72887,0.77639,3.04268,1.53278,1.79299 75%,Outflow,3.83802,2.84995,0.85354,2.79341,1.59183,2.80979 75%,Reference,2.65065,2.26185,0.79261,3.61179,3.20153,2.74225 90%,Inflow,6.02835,24.40655,0.99293,8.00691,6.28345,8.51706 90%,Outflow,12.43052,23.90022,2.43829,5.66731,2.30348,10.32829 90%,Reference,12.58278,6.67125,1.2205,4.78255,7.72012,8.57303 max,Inflow,13.87664,45.97893,1.26657,21.75505,8.88365,163.01001 max,Outflow,36.58941,47.49381,8.04948,12.39894,4.19118,23.29367 max,Reference,21.22363,48.23615,1.94442,7.67751,8.75609,10.5095 """ ) ) expected = pandas.read_csv(known_csv, index_col=[0, 1]).T pdtest.assert_frame_equal( expected.round(5), dc.stat_summary().round(5), check_names=False, check_dtype=False, rtol=1e-4, ) def test_locations(dc): for loc in dc.locations: assert isinstance(loc, Location) assert len(dc.locations) == 18 assert dc.locations[0].definition == {"loc": "Inflow", "param": "A"} assert dc.locations[1].definition == {"loc": "Inflow", "param": "B"} def test_datasets(dc): _ds = [] for d in dc.datasets("Inflow", "Outflow"): assert isinstance(d, Dataset) _ds.append(d) assert len(_ds) == 6 assert _ds[0].definition == {"param": "A"} assert _ds[1].definition == {"param": "B"} # this sufficiently tests dc._filter_collection def test_selectLocations(dc): locs = dc.selectLocations(param="A", loc=["Inflow", "Outflow"]) assert len(locs) == 2 for n, (loc, loctype) in enumerate(zip(locs, ["Inflow", "Outflow"])): assert isinstance(loc, Location) assert loc.definition["param"] == "A" assert loc.definition["loc"] == loctype def test_selectLocations_squeeze_False(dc): locs = dc.selectLocations(param="A", loc=["Inflow"], squeeze=False) assert len(locs) == 1 for n, loc in enumerate(locs): assert isinstance(loc, Location) assert loc.definition["param"] == "A" assert loc.definition["loc"] == "Inflow" def test_selectLocations_squeeze_True(dc): loc = dc.selectLocations(param="A", loc=["Inflow"], squeeze=True) assert isinstance(loc, Location) assert loc.definition["param"] == "A" assert loc.definition["loc"] == "Inflow" def test_selectLocations_squeeze_True_None(dc): loc = dc.selectLocations(param="A", loc=["Junk"], squeeze=True) assert loc is None # since the test_selectLocations* tests stress _filter_collection # enough, we'll mock it out for datasets: def test_selectDatasets(dc): with mock.patch.object(dc, "_filter_collection") as _fc: with mock.patch.object(dc, "datasets", return_value=["A", "B"]) as _ds: dc.selectDatasets("Inflow", "Reference", foo="A", bar="C") _ds.assert_called_once_with("Inflow", "Reference") _fc.assert_called_once_with(["A", "B"], foo="A", bar="C", squeeze=False) @pytest.mark.parametrize("func", [stats.mannwhitneyu, stats.wilcoxon]) @pytest.mark.parametrize( ("x", "all_same"), [([5, 5, 5, 5, 5], True), ([5, 6, 7, 7, 8], False)] ) def test_dist_compare_wrapper(x, all_same, func): y = [5, 5, 5, 5, 5] with mock.patch.object(stats, func.__name__) as _test: result = _dist_compare(x, y, _test) if all_same: assert numpy.isnan(result.stat) assert numpy.isnan(result.pvalue) assert _test.call_count == 0 else: # assert result == (0, 0) _test.assert_called_once_with(x, y, alternative="two-sided")
bsd-3-clause
imanolarrieta/RL
rlpy/Domains/HelicopterHover.py
4
16981
"""Helicopter hovering task.""" from .Domain import Domain import numpy as np import rlpy.Tools.transformations as trans from rlpy.Tools.GeneralTools import cartesian import matplotlib.pyplot as plt from matplotlib.patches import FancyArrowPatch, Circle, Ellipse from mpl_toolkits.mplot3d import proj3d __copyright__ = "Copyright 2013, RLPy http://acl.mit.edu/RLPy" __credits__ = ["Alborz Geramifard", "Robert H. Klein", "Christoph Dann", "William Dabney", "Jonathan P. How"] __license__ = "BSD 3-Clause" __author__ = "Christoph Dann <cdann@cdann.de>" class Arrow3D(FancyArrowPatch): """ Helper class for plotting arrows in 3d """ def __init__(self, xs, ys, zs, *args, **kwargs): FancyArrowPatch.__init__(self, (0, 0), (0, 0), *args, **kwargs) self._verts3d = xs, ys, zs def draw(self, renderer): xs3d, ys3d, zs3d = self._verts3d xs, ys, zs = proj3d.proj_transform(xs3d, ys3d, zs3d, renderer.M) self.set_positions((xs[0], ys[0]), (xs[1], ys[1])) FancyArrowPatch.draw(self, renderer) class HelicopterHoverExtended(Domain): """ Implementation of a simulator that models one of the Stanford autonomous helicopters (an XCell Tempest helicopter) in the flight regime close to hover. Adapted from the `RL-Community Java Implementation <http://library.rl-community.org/wiki/Helicopter_(Java)>`_ **STATE:** The state of the helicopter is described by a 20-dimensional vector with the following entries: * 0: xerr [helicopter x-coord position - desired x-coord position] -- helicopter's x-axis points forward * 1: yerr [helicopter y-coord position - desired y-coord position] -- helicopter's y-axis points to the right * 2: zerr [helicopter z-coord position - desired z-coord position] -- helicopter's z-axis points down * 3: u [forward velocity] * 4: v [sideways velocity (to the right)] * 5: w [downward velocity] * 6: p [angular rate around helicopter's x axis] * 7: q [angular rate around helicopter's y axis] * 8: r [angular rate around helicopter's z axis] * 9-12: orientation of heli in world as quaterion * 13-18: current noise due to gusts (usually not observable!) * 19: t number of timesteps in current episode **REFERENCE:** .. seealso:: Abbeel, P., Ganapathi, V. & Ng, A. Learning vehicular dynamics, with application to modeling helicopters. Advances in Neural Information Systems (2006). """ MAX_POS = 20. #: [m] maximum deviation in position in each dimension MAX_VEL = 10. #: [m/s] maximum velocity in each dimension MAX_ANG_RATE = 4 * np.pi # : maximum angular velocity MAX_ANG = 1. WIND_MAX = 5. # : maximum gust indensity MIN_QW_BEFORE_HITTING_TERMINAL_STATE = np.cos(30. / 2. * np.pi / 180.) wind = np.array([.0, .0, 0.]) #: wind in neutral orientation discount_factor = 0.95 #: discount factor gust_memory = 0.8 domain_fig = None episodeCap = 6000 # model specific parameters from the learned model noise_std = np.array([0.1941, 0.2975, 0.6058, 0.1508, 0.2492, 0.0734]) drag_vel_body = np.array([.18, .43, .49]) drag_ang_rate = np.array([12.78, 10.12, 8.16]) u_coeffs = np.array([33.04, -33.32, 70.54, -42.15]) tail_rotor_side_thrust = -0.54 dt = 0.01 #: length of one timestep continuous_dims = np.arange(20) statespace_limits_full = np.array([[-MAX_POS, MAX_POS]] * 3 + [[-MAX_VEL, MAX_VEL]] * 3 + [[-MAX_ANG_RATE, MAX_ANG_RATE]] * 3 + [[-MAX_ANG, MAX_ANG]] * 4 + [[-2., 2.]] * 6 + [[0, episodeCap]]) statespace_limits = statespace_limits_full # create all combinations of possible actions _action_bounds = np.array([[-2., 2.]] * 4) # maximum action: 2 _actions_dim = np.array( [[-.2, -0.05, 0.05, 0.2]] * 3 + [[0., 0.15, 0.3, 0.5]]) actions = cartesian(list(_actions_dim)) #: all possible actions actions_num = np.prod(actions.shape[0]) def __init__(self, noise_level=1., discount_factor=0.95): self.noise_level = noise_level self.discount_factor = discount_factor super(HelicopterHoverExtended, self).__init__() def s0(self): self.state = np.zeros((20)) self.state[9] = 1. return self.state.copy(), self.isTerminal(), self.possibleActions() def isTerminal(self): s = self.state if np.any(self.statespace_limits_full[:9, 0] > s[:9]) or np.any(self.statespace_limits_full[:9, 1] < s[:9]): return True if len(s) <= 12: w = np.sqrt(1. - np.sum(s[9:12] ** 2)) else: w = s[9] return np.abs(w) < self.MIN_QW_BEFORE_HITTING_TERMINAL_STATE def _get_reward(self): s = self.state if self.isTerminal(): r = -np.sum(self.statespace_limits[:9, 1] ** 2) #r -= np.sum(self.statespace_limits[10:12, 1] ** 2) r -= (1. - self.MIN_QW_BEFORE_HITTING_TERMINAL_STATE ** 2) return r * (self.episodeCap - s[-1]) else: return -np.sum(s[:9] ** 2) - np.sum(s[10:12] ** 2) def possibleActions(self, s=None): return np.arange(self.actions_num) def step(self, a): a = self.actions[a] # make sure the actions are not beyond their limits a = np.maximum(self._action_bounds[:, 0], np.minimum(a, self._action_bounds[:, 1])) pos, vel, ang_rate, ori_bases, q = self._state_in_world(self.state) t = self.state[-1] gust_noise = self.state[13:19] gust_noise = (self.gust_memory * gust_noise + (1. - self.gust_memory) * self.random_state.randn(6) * self.noise_level * self.noise_std) # update noise which simulates gusts for i in range(10): # Euler integration # position pos += self.dt * vel # compute acceleration on the helicopter vel_body = self._in_world_coord(vel, q) wind_body = self._in_world_coord(self.wind, q) wind_body[-1] = 0. # the java implementation # has it this way acc_body = -self.drag_vel_body * (vel_body + wind_body) acc_body[-1] += self.u_coeffs[-1] * a[-1] acc_body[1] += self.tail_rotor_side_thrust acc_body += gust_noise[:3] acc = self._in_body_coord(acc_body, q) acc[-1] += 9.81 # gravity # velocity vel += self.dt * acc # orientation tmp = self.dt * ang_rate qdt = trans.quaternion_about_axis(np.linalg.norm(tmp), tmp) q = trans.quaternion_multiply(q, qdt) #assert np.allclose(1., np.sum(q**2)) # angular accelerations ang_acc = -ang_rate * self.drag_ang_rate + \ self.u_coeffs[:3] * a[:3] ang_acc += gust_noise[3:] ang_rate += self.dt * ang_acc st = np.zeros_like(self.state) st[:3] = -self._in_body_coord(pos, q) st[3:6] = self._in_body_coord(vel, q) st[6:9] = ang_rate st[9:13] = q st[13:19] = gust_noise st[-1] = t + 1 self.state = st.copy() return ( self._get_reward(), st, self.isTerminal(), self.possibleActions() ) def _state_in_world(self, s): """ transforms state from body coordinates in world coordinates .. warning:: angular rate still in body frame! """ pos_body = s[:3] vel_body = s[3:6] ang_rate = s[6:9].copy() q = s[9:13].copy() pos = self._in_world_coord(-pos_body, q) vel = self._in_world_coord(vel_body, q) rot = trans.quaternion_matrix(trans.quaternion_conjugate(q))[:3, :3] return pos, vel, ang_rate, rot, q def _in_body_coord(self, p, q): """ q is the inverse quaternion of the rotation of the helicopter in world coordinates """ q_pos = np.zeros((4)) q_pos[1:] = p q_p = trans.quaternion_multiply(trans.quaternion_multiply(q, q_pos), trans.quaternion_conjugate(q)) return q_p[1:] def _in_world_coord(self, p, q): """ q is the inverse quaternion of the rotation of the helicopter in world coordinates """ return self._in_body_coord(p, trans.quaternion_conjugate(q)) def showDomain(self, a=None): s = self.state if a is not None: a = self.actions[a].copy() * 3 # amplify for visualization pos, vel, ang_rate, ori_bases, _ = self._state_in_world(s) coords = np.zeros((3, 3, 2)) + pos[None, :, None] coords[:, :, 1] += ori_bases * 4 u, v = np.mgrid[0:2 * np.pi:10j, 0:2:1.] # rotor coordinates coord = np.zeros([3] + list(u.shape)) coord[0] = .1 * np.sin(u) * v coord[1] = 0. coord[2] = .1 * np.cos(u) * v coord[0] -= 0.8 coord_side = np.einsum("ij,jkl->ikl", np.linalg.pinv(ori_bases), coord) coord_side += pos[:, None, None] coord = np.zeros([3] + list(u.shape)) coord[0] = .6 * np.cos(u) * v coord[1] = .6 * np.sin(u) * v coord[2] = -.4 coord_main = np.einsum("ij,jkl->ikl", np.linalg.pinv(ori_bases), coord) coord_main += pos[:, None, None] style = dict(fc="r", ec="r", lw=2., head_width=0.05, head_length=0.1) if self.domain_fig is None: self.domain_fig = plt.figure(figsize=(12, 8)) # action axes ax1 = plt.subplot2grid((1, 3), (0, 0), frameon=False) ax1.get_xaxis().set_visible(False) ax1.get_yaxis().set_visible(False) lim = 2 # self.MAX_POS ax1.set_xlim(-lim, lim) ax1.set_ylim(-lim, lim) if a is None: a = np.zeros((4)) # main rotor ax1.add_artist(Circle(np.zeros((2)), radius=0.6)) ax1.add_artist(Ellipse(np.array([0, 1.5]), height=0.3, width=0.02)) # TODO make sure the actions are plotted right # main rotor direction? arr1 = ax1.arrow(0, 0, a[0], 0, **style) arr2 = ax1.arrow(0, 0, 0, a[1], **style) # side rotor throttle? arr3 = ax1.arrow(0, 1.5, a[2], 0, **style) # main rotor throttle arr4 = ax1.arrow(1.5, 0, 0, a[3], **style) ax1.set_aspect("equal") self.action_arrows = (arr1, arr2, arr3, arr4) self.action_ax = ax1 #ax = self.domain_fig.gca(projection='3d') ax = plt.subplot2grid((1, 3), (0, 1), colspan=2, projection='3d') ax.view_init(elev=np.pi) # print origin x = Arrow3D([0, 2], [0, 0], [0, 0], mutation_scale=30, lw=1, arrowstyle="-|>", color="r") y = Arrow3D([0, 0], [0, 2], [0, 0], mutation_scale=30, lw=1, arrowstyle="-|>", color="b") z = Arrow3D([0, 0], [0, 0], [0, 2], mutation_scale=30, lw=1, arrowstyle="-|>", color="g") ax.add_artist(x) ax.add_artist(y) ax.add_artist(z) # print helicopter coordinate axes x = Arrow3D(*coords[0], mutation_scale=30, lw=2, arrowstyle="-|>", color="r") y = Arrow3D(*coords[1], mutation_scale=30, lw=2, arrowstyle="-|>", color="b") z = Arrow3D(*coords[2], mutation_scale=30, lw=2, arrowstyle="-|>", color="g") ax.add_artist(x) ax.add_artist(y) ax.add_artist(z) self.heli_arrows = (x, y, z) self._wframe_main = ax.plot_wireframe(coord_main[0], coord_main[1], coord_main[2], color="k") self._wframe_side = ax.plot_wireframe(coord_side[0], coord_side[1], coord_side[2], color="k") self._ax = ax ax.set_aspect("equal") lim = 5 # self.MAX_POS ax.set_xlim(-lim, lim) ax.set_ylim(-lim, lim) ax.set_zlim(-lim, lim) ax.view_init(elev=-135) plt.show() else: self.heli_arrows[0]._verts3d = tuple(coords[0]) self.heli_arrows[1]._verts3d = tuple(coords[1]) self.heli_arrows[2]._verts3d = tuple(coords[2]) ax = self._ax ax.collections.remove(self._wframe_main) ax.collections.remove(self._wframe_side) for arr in self.action_arrows: self.action_ax.artists.remove(arr) ax1 = self.action_ax # TODO make sure the actions are plotted right # main rotor direction? arr1 = ax1.arrow(0, 0, a[0], 0, **style) arr2 = ax1.arrow(0, 0, 0, a[1], **style) # side rotor throttle? arr3 = ax1.arrow(0, 1.5, a[2], 0, **style) # main rotor throttle arr4 = ax1.arrow(1.5, 0, 0, a[3], **style) self.action_arrows = (arr1, arr2, arr3, arr4) self._wframe_main = ax.plot_wireframe(coord_main[0], coord_main[1], coord_main[2], color="k") self._wframe_side = ax.plot_wireframe(coord_side[0], coord_side[1], coord_side[2], color="k") ax.set_aspect("equal") lim = 5 # self.MAX_POS ax.set_xlim(-lim, lim) ax.set_ylim(-lim, lim) ax.set_zlim(-lim, lim) ax.view_init(elev=-135) self.domain_fig.canvas.draw() class HelicopterHover(HelicopterHoverExtended): """ .. warning:: This domain has an internal hidden state, as it actually is a POMDP. Besides the 12-dimensional observable state, there is an internal state saved as ``self.hidden_state_`` (time and long-term noise which simulated gusts of wind). be aware of this state if you use this class to produce samples which are not in order Implementation of a simulator that models one of the Stanford autonomous helicopters (an XCell Tempest helicopter) in the flight regime close to hover. Adapted from the `RL-Community Java Implementation <http://library.rl-community.org/wiki/Helicopter_(Java)>`_ **STATE:** The state of the helicopter is described by a 12-dimensional vector with the following entries: * 0: xerr [helicopter x-coord position - desired x-coord position] -- helicopter's x-axis points forward * 1: yerr [helicopter y-coord position - desired y-coord position] -- helicopter's y-axis points to the right * 2: zerr [helicopter z-coord position - desired z-coord position] -- helicopter's z-axis points down * 3: u [forward velocity] * 4: v [sideways velocity (to the right)] * 5: w [downward velocity] * 6: p [angular rate around helicopter's x axis] * 7: q [angular rate around helicopter's y axis] * 8: r [angular rate around helicopter's z axis] * 9-11: orientation of the world in the heli system as quaterion **REFERENCE:** .. seealso:: Abbeel, P., Ganapathi, V. & Ng, A. Learning vehicular dynamics, with application to modeling helicopters. Advances in Neural Information Systems (2006). """ episodeCap = 6000 MAX_POS = 20. # m MAX_VEL = 10. # m/s MAX_ANG_RATE = 4 * np.pi MAX_ANG = 1. WIND_MAX = 5. continuous_dims = np.arange(12) statespace_limits = np.array([[-MAX_POS, MAX_POS]] * 3 + [[-MAX_VEL, MAX_VEL]] * 3 + [[-MAX_ANG_RATE, MAX_ANG_RATE]] * 3 + [[-MAX_ANG, MAX_ANG]] * 3) #full_state_ = np.zeros((20)) def s0(self): #self.hidden_state_ = np.zeros((8)) #self.hidden_state_[0] = 1. s_full, term, p_actions = super(HelicopterHover, self).s0() s, _ = self._split_state(s_full) return s, term, p_actions def _split_state(self, s): s_observable = np.zeros((12)) s_observable[:9] = s[:9] s_observable[9:12] = s[10:13] s_hidden = np.zeros((8)) s_hidden[0] = s[9] s_hidden[1:] = s[13:] return s_observable, s_hidden def step(self, a): #s_extended = self._augment_state(s) r, st, term, p_actions = super(HelicopterHover, self).step(a) st, _ = self._split_state(st) return (r, st, term, p_actions)
bsd-3-clause
harshaneelhg/scikit-learn
sklearn/naive_bayes.py
128
28358
# -*- coding: utf-8 -*- """ The :mod:`sklearn.naive_bayes` module implements Naive Bayes algorithms. These are supervised learning methods based on applying Bayes' theorem with strong (naive) feature independence assumptions. """ # Author: Vincent Michel <vincent.michel@inria.fr> # Minor fixes by Fabian Pedregosa # Amit Aides <amitibo@tx.technion.ac.il> # Yehuda Finkelstein <yehudaf@tx.technion.ac.il> # Lars Buitinck <L.J.Buitinck@uva.nl> # Jan Hendrik Metzen <jhm@informatik.uni-bremen.de> # (parts based on earlier work by Mathieu Blondel) # # License: BSD 3 clause from abc import ABCMeta, abstractmethod import numpy as np from scipy.sparse import issparse from .base import BaseEstimator, ClassifierMixin from .preprocessing import binarize from .preprocessing import LabelBinarizer from .preprocessing import label_binarize from .utils import check_X_y, check_array from .utils.extmath import safe_sparse_dot, logsumexp from .utils.multiclass import _check_partial_fit_first_call from .utils.fixes import in1d from .utils.validation import check_is_fitted from .externals import six __all__ = ['BernoulliNB', 'GaussianNB', 'MultinomialNB'] class BaseNB(six.with_metaclass(ABCMeta, BaseEstimator, ClassifierMixin)): """Abstract base class for naive Bayes estimators""" @abstractmethod def _joint_log_likelihood(self, X): """Compute the unnormalized posterior log probability of X I.e. ``log P(c) + log P(x|c)`` for all rows x of X, as an array-like of shape [n_classes, n_samples]. Input is passed to _joint_log_likelihood as-is by predict, predict_proba and predict_log_proba. """ def predict(self, X): """ Perform classification on an array of test vectors X. Parameters ---------- X : array-like, shape = [n_samples, n_features] Returns ------- C : array, shape = [n_samples] Predicted target values for X """ jll = self._joint_log_likelihood(X) return self.classes_[np.argmax(jll, axis=1)] def predict_log_proba(self, X): """ Return log-probability estimates for the test vector X. Parameters ---------- X : array-like, shape = [n_samples, n_features] Returns ------- C : array-like, shape = [n_samples, n_classes] Returns the log-probability of the samples for each class in the model. The columns correspond to the classes in sorted order, as they appear in the attribute `classes_`. """ jll = self._joint_log_likelihood(X) # normalize by P(x) = P(f_1, ..., f_n) log_prob_x = logsumexp(jll, axis=1) return jll - np.atleast_2d(log_prob_x).T def predict_proba(self, X): """ Return probability estimates for the test vector X. Parameters ---------- X : array-like, shape = [n_samples, n_features] Returns ------- C : array-like, shape = [n_samples, n_classes] Returns the probability of the samples for each class in the model. The columns correspond to the classes in sorted order, as they appear in the attribute `classes_`. """ return np.exp(self.predict_log_proba(X)) class GaussianNB(BaseNB): """ Gaussian Naive Bayes (GaussianNB) Can perform online updates to model parameters via `partial_fit` method. For details on algorithm used to update feature means and variance online, see Stanford CS tech report STAN-CS-79-773 by Chan, Golub, and LeVeque: http://i.stanford.edu/pub/cstr/reports/cs/tr/79/773/CS-TR-79-773.pdf Read more in the :ref:`User Guide <gaussian_naive_bayes>`. Attributes ---------- class_prior_ : array, shape (n_classes,) probability of each class. class_count_ : array, shape (n_classes,) number of training samples observed in each class. theta_ : array, shape (n_classes, n_features) mean of each feature per class sigma_ : array, shape (n_classes, n_features) variance of each feature per class Examples -------- >>> import numpy as np >>> X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]]) >>> Y = np.array([1, 1, 1, 2, 2, 2]) >>> from sklearn.naive_bayes import GaussianNB >>> clf = GaussianNB() >>> clf.fit(X, Y) GaussianNB() >>> print(clf.predict([[-0.8, -1]])) [1] >>> clf_pf = GaussianNB() >>> clf_pf.partial_fit(X, Y, np.unique(Y)) GaussianNB() >>> print(clf_pf.predict([[-0.8, -1]])) [1] """ def fit(self, X, y, sample_weight=None): """Fit Gaussian Naive Bayes according to X, y Parameters ---------- X : array-like, shape (n_samples, n_features) Training vectors, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape (n_samples,) Target values. sample_weight : array-like, shape (n_samples,), optional Weights applied to individual samples (1. for unweighted). Returns ------- self : object Returns self. """ X, y = check_X_y(X, y) return self._partial_fit(X, y, np.unique(y), _refit=True, sample_weight=sample_weight) @staticmethod def _update_mean_variance(n_past, mu, var, X, sample_weight=None): """Compute online update of Gaussian mean and variance. Given starting sample count, mean, and variance, a new set of points X, and optionally sample weights, return the updated mean and variance. (NB - each dimension (column) in X is treated as independent -- you get variance, not covariance). Can take scalar mean and variance, or vector mean and variance to simultaneously update a number of independent Gaussians. See Stanford CS tech report STAN-CS-79-773 by Chan, Golub, and LeVeque: http://i.stanford.edu/pub/cstr/reports/cs/tr/79/773/CS-TR-79-773.pdf Parameters ---------- n_past : int Number of samples represented in old mean and variance. If sample weights were given, this should contain the sum of sample weights represented in old mean and variance. mu : array-like, shape (number of Gaussians,) Means for Gaussians in original set. var : array-like, shape (number of Gaussians,) Variances for Gaussians in original set. sample_weight : array-like, shape (n_samples,), optional Weights applied to individual samples (1. for unweighted). Returns ------- total_mu : array-like, shape (number of Gaussians,) Updated mean for each Gaussian over the combined set. total_var : array-like, shape (number of Gaussians,) Updated variance for each Gaussian over the combined set. """ if X.shape[0] == 0: return mu, var # Compute (potentially weighted) mean and variance of new datapoints if sample_weight is not None: n_new = float(sample_weight.sum()) new_mu = np.average(X, axis=0, weights=sample_weight / n_new) new_var = np.average((X - new_mu) ** 2, axis=0, weights=sample_weight / n_new) else: n_new = X.shape[0] new_var = np.var(X, axis=0) new_mu = np.mean(X, axis=0) if n_past == 0: return new_mu, new_var n_total = float(n_past + n_new) # Combine mean of old and new data, taking into consideration # (weighted) number of observations total_mu = (n_new * new_mu + n_past * mu) / n_total # Combine variance of old and new data, taking into consideration # (weighted) number of observations. This is achieved by combining # the sum-of-squared-differences (ssd) old_ssd = n_past * var new_ssd = n_new * new_var total_ssd = (old_ssd + new_ssd + (n_past / float(n_new * n_total)) * (n_new * mu - n_new * new_mu) ** 2) total_var = total_ssd / n_total return total_mu, total_var def partial_fit(self, X, y, classes=None, sample_weight=None): """Incremental fit on a batch of samples. This method is expected to be called several times consecutively on different chunks of a dataset so as to implement out-of-core or online learning. This is especially useful when the whole dataset is too big to fit in memory at once. This method has some performance and numerical stability overhead, hence it is better to call partial_fit on chunks of data that are as large as possible (as long as fitting in the memory budget) to hide the overhead. Parameters ---------- X : array-like, shape (n_samples, n_features) Training vectors, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape (n_samples,) Target values. classes : array-like, shape (n_classes,) List of all the classes that can possibly appear in the y vector. Must be provided at the first call to partial_fit, can be omitted in subsequent calls. sample_weight : array-like, shape (n_samples,), optional Weights applied to individual samples (1. for unweighted). Returns ------- self : object Returns self. """ return self._partial_fit(X, y, classes, _refit=False, sample_weight=sample_weight) def _partial_fit(self, X, y, classes=None, _refit=False, sample_weight=None): """Actual implementation of Gaussian NB fitting. Parameters ---------- X : array-like, shape (n_samples, n_features) Training vectors, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape (n_samples,) Target values. classes : array-like, shape (n_classes,) List of all the classes that can possibly appear in the y vector. Must be provided at the first call to partial_fit, can be omitted in subsequent calls. _refit: bool If true, act as though this were the first time we called _partial_fit (ie, throw away any past fitting and start over). sample_weight : array-like, shape (n_samples,), optional Weights applied to individual samples (1. for unweighted). Returns ------- self : object Returns self. """ X, y = check_X_y(X, y) epsilon = 1e-9 if _refit: self.classes_ = None if _check_partial_fit_first_call(self, classes): # This is the first call to partial_fit: # initialize various cumulative counters n_features = X.shape[1] n_classes = len(self.classes_) self.theta_ = np.zeros((n_classes, n_features)) self.sigma_ = np.zeros((n_classes, n_features)) self.class_prior_ = np.zeros(n_classes) self.class_count_ = np.zeros(n_classes) else: if X.shape[1] != self.theta_.shape[1]: msg = "Number of features %d does not match previous data %d." raise ValueError(msg % (X.shape[1], self.theta_.shape[1])) # Put epsilon back in each time self.sigma_[:, :] -= epsilon classes = self.classes_ unique_y = np.unique(y) unique_y_in_classes = in1d(unique_y, classes) if not np.all(unique_y_in_classes): raise ValueError("The target label(s) %s in y do not exist in the " "initial classes %s" % (y[~unique_y_in_classes], classes)) for y_i in unique_y: i = classes.searchsorted(y_i) X_i = X[y == y_i, :] if sample_weight is not None: sw_i = sample_weight[y == y_i] N_i = sw_i.sum() else: sw_i = None N_i = X_i.shape[0] new_theta, new_sigma = self._update_mean_variance( self.class_count_[i], self.theta_[i, :], self.sigma_[i, :], X_i, sw_i) self.theta_[i, :] = new_theta self.sigma_[i, :] = new_sigma self.class_count_[i] += N_i self.sigma_[:, :] += epsilon self.class_prior_[:] = self.class_count_ / np.sum(self.class_count_) return self def _joint_log_likelihood(self, X): check_is_fitted(self, "classes_") X = check_array(X) joint_log_likelihood = [] for i in range(np.size(self.classes_)): jointi = np.log(self.class_prior_[i]) n_ij = - 0.5 * np.sum(np.log(2. * np.pi * self.sigma_[i, :])) n_ij -= 0.5 * np.sum(((X - self.theta_[i, :]) ** 2) / (self.sigma_[i, :]), 1) joint_log_likelihood.append(jointi + n_ij) joint_log_likelihood = np.array(joint_log_likelihood).T return joint_log_likelihood class BaseDiscreteNB(BaseNB): """Abstract base class for naive Bayes on discrete/categorical data Any estimator based on this class should provide: __init__ _joint_log_likelihood(X) as per BaseNB """ def _update_class_log_prior(self, class_prior=None): n_classes = len(self.classes_) if class_prior is not None: if len(class_prior) != n_classes: raise ValueError("Number of priors must match number of" " classes.") self.class_log_prior_ = np.log(class_prior) elif self.fit_prior: # empirical prior, with sample_weight taken into account self.class_log_prior_ = (np.log(self.class_count_) - np.log(self.class_count_.sum())) else: self.class_log_prior_ = np.zeros(n_classes) - np.log(n_classes) def partial_fit(self, X, y, classes=None, sample_weight=None): """Incremental fit on a batch of samples. This method is expected to be called several times consecutively on different chunks of a dataset so as to implement out-of-core or online learning. This is especially useful when the whole dataset is too big to fit in memory at once. This method has some performance overhead hence it is better to call partial_fit on chunks of data that are as large as possible (as long as fitting in the memory budget) to hide the overhead. Parameters ---------- X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape = [n_samples] Target values. classes : array-like, shape = [n_classes] List of all the classes that can possibly appear in the y vector. Must be provided at the first call to partial_fit, can be omitted in subsequent calls. sample_weight : array-like, shape = [n_samples], optional Weights applied to individual samples (1. for unweighted). Returns ------- self : object Returns self. """ X = check_array(X, accept_sparse='csr', dtype=np.float64) _, n_features = X.shape if _check_partial_fit_first_call(self, classes): # This is the first call to partial_fit: # initialize various cumulative counters n_effective_classes = len(classes) if len(classes) > 1 else 2 self.class_count_ = np.zeros(n_effective_classes, dtype=np.float64) self.feature_count_ = np.zeros((n_effective_classes, n_features), dtype=np.float64) elif n_features != self.coef_.shape[1]: msg = "Number of features %d does not match previous data %d." raise ValueError(msg % (n_features, self.coef_.shape[-1])) Y = label_binarize(y, classes=self.classes_) if Y.shape[1] == 1: Y = np.concatenate((1 - Y, Y), axis=1) n_samples, n_classes = Y.shape if X.shape[0] != Y.shape[0]: msg = "X.shape[0]=%d and y.shape[0]=%d are incompatible." raise ValueError(msg % (X.shape[0], y.shape[0])) # label_binarize() returns arrays with dtype=np.int64. # We convert it to np.float64 to support sample_weight consistently Y = Y.astype(np.float64) if sample_weight is not None: Y *= check_array(sample_weight).T class_prior = self.class_prior # Count raw events from data before updating the class log prior # and feature log probas self._count(X, Y) # XXX: OPTIM: we could introduce a public finalization method to # be called by the user explicitly just once after several consecutive # calls to partial_fit and prior any call to predict[_[log_]proba] # to avoid computing the smooth log probas at each call to partial fit self._update_feature_log_prob() self._update_class_log_prior(class_prior=class_prior) return self def fit(self, X, y, sample_weight=None): """Fit Naive Bayes classifier according to X, y Parameters ---------- X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape = [n_samples] Target values. sample_weight : array-like, shape = [n_samples], optional Weights applied to individual samples (1. for unweighted). Returns ------- self : object Returns self. """ X, y = check_X_y(X, y, 'csr') _, n_features = X.shape labelbin = LabelBinarizer() Y = labelbin.fit_transform(y) self.classes_ = labelbin.classes_ if Y.shape[1] == 1: Y = np.concatenate((1 - Y, Y), axis=1) # LabelBinarizer().fit_transform() returns arrays with dtype=np.int64. # We convert it to np.float64 to support sample_weight consistently; # this means we also don't have to cast X to floating point Y = Y.astype(np.float64) if sample_weight is not None: Y *= check_array(sample_weight).T class_prior = self.class_prior # Count raw events from data before updating the class log prior # and feature log probas n_effective_classes = Y.shape[1] self.class_count_ = np.zeros(n_effective_classes, dtype=np.float64) self.feature_count_ = np.zeros((n_effective_classes, n_features), dtype=np.float64) self._count(X, Y) self._update_feature_log_prob() self._update_class_log_prior(class_prior=class_prior) return self # XXX The following is a stopgap measure; we need to set the dimensions # of class_log_prior_ and feature_log_prob_ correctly. def _get_coef(self): return (self.feature_log_prob_[1:] if len(self.classes_) == 2 else self.feature_log_prob_) def _get_intercept(self): return (self.class_log_prior_[1:] if len(self.classes_) == 2 else self.class_log_prior_) coef_ = property(_get_coef) intercept_ = property(_get_intercept) class MultinomialNB(BaseDiscreteNB): """ Naive Bayes classifier for multinomial models The multinomial Naive Bayes classifier is suitable for classification with discrete features (e.g., word counts for text classification). The multinomial distribution normally requires integer feature counts. However, in practice, fractional counts such as tf-idf may also work. Read more in the :ref:`User Guide <multinomial_naive_bayes>`. Parameters ---------- alpha : float, optional (default=1.0) Additive (Laplace/Lidstone) smoothing parameter (0 for no smoothing). fit_prior : boolean Whether to learn class prior probabilities or not. If false, a uniform prior will be used. class_prior : array-like, size (n_classes,) Prior probabilities of the classes. If specified the priors are not adjusted according to the data. Attributes ---------- class_log_prior_ : array, shape (n_classes, ) Smoothed empirical log probability for each class. intercept_ : property Mirrors ``class_log_prior_`` for interpreting MultinomialNB as a linear model. feature_log_prob_ : array, shape (n_classes, n_features) Empirical log probability of features given a class, ``P(x_i|y)``. coef_ : property Mirrors ``feature_log_prob_`` for interpreting MultinomialNB as a linear model. class_count_ : array, shape (n_classes,) Number of samples encountered for each class during fitting. This value is weighted by the sample weight when provided. feature_count_ : array, shape (n_classes, n_features) Number of samples encountered for each (class, feature) during fitting. This value is weighted by the sample weight when provided. Examples -------- >>> import numpy as np >>> X = np.random.randint(5, size=(6, 100)) >>> y = np.array([1, 2, 3, 4, 5, 6]) >>> from sklearn.naive_bayes import MultinomialNB >>> clf = MultinomialNB() >>> clf.fit(X, y) MultinomialNB(alpha=1.0, class_prior=None, fit_prior=True) >>> print(clf.predict(X[2])) [3] Notes ----- For the rationale behind the names `coef_` and `intercept_`, i.e. naive Bayes as a linear classifier, see J. Rennie et al. (2003), Tackling the poor assumptions of naive Bayes text classifiers, ICML. References ---------- C.D. Manning, P. Raghavan and H. Schuetze (2008). Introduction to Information Retrieval. Cambridge University Press, pp. 234-265. http://nlp.stanford.edu/IR-book/html/htmledition/naive-bayes-text-classification-1.html """ def __init__(self, alpha=1.0, fit_prior=True, class_prior=None): self.alpha = alpha self.fit_prior = fit_prior self.class_prior = class_prior def _count(self, X, Y): """Count and smooth feature occurrences.""" if np.any((X.data if issparse(X) else X) < 0): raise ValueError("Input X must be non-negative") self.feature_count_ += safe_sparse_dot(Y.T, X) self.class_count_ += Y.sum(axis=0) def _update_feature_log_prob(self): """Apply smoothing to raw counts and recompute log probabilities""" smoothed_fc = self.feature_count_ + self.alpha smoothed_cc = smoothed_fc.sum(axis=1) self.feature_log_prob_ = (np.log(smoothed_fc) - np.log(smoothed_cc.reshape(-1, 1))) def _joint_log_likelihood(self, X): """Calculate the posterior log probability of the samples X""" check_is_fitted(self, "classes_") X = check_array(X, accept_sparse='csr') return (safe_sparse_dot(X, self.feature_log_prob_.T) + self.class_log_prior_) class BernoulliNB(BaseDiscreteNB): """Naive Bayes classifier for multivariate Bernoulli models. Like MultinomialNB, this classifier is suitable for discrete data. The difference is that while MultinomialNB works with occurrence counts, BernoulliNB is designed for binary/boolean features. Read more in the :ref:`User Guide <bernoulli_naive_bayes>`. Parameters ---------- alpha : float, optional (default=1.0) Additive (Laplace/Lidstone) smoothing parameter (0 for no smoothing). binarize : float or None, optional Threshold for binarizing (mapping to booleans) of sample features. If None, input is presumed to already consist of binary vectors. fit_prior : boolean Whether to learn class prior probabilities or not. If false, a uniform prior will be used. class_prior : array-like, size=[n_classes,] Prior probabilities of the classes. If specified the priors are not adjusted according to the data. Attributes ---------- class_log_prior_ : array, shape = [n_classes] Log probability of each class (smoothed). feature_log_prob_ : array, shape = [n_classes, n_features] Empirical log probability of features given a class, P(x_i|y). class_count_ : array, shape = [n_classes] Number of samples encountered for each class during fitting. This value is weighted by the sample weight when provided. feature_count_ : array, shape = [n_classes, n_features] Number of samples encountered for each (class, feature) during fitting. This value is weighted by the sample weight when provided. Examples -------- >>> import numpy as np >>> X = np.random.randint(2, size=(6, 100)) >>> Y = np.array([1, 2, 3, 4, 4, 5]) >>> from sklearn.naive_bayes import BernoulliNB >>> clf = BernoulliNB() >>> clf.fit(X, Y) BernoulliNB(alpha=1.0, binarize=0.0, class_prior=None, fit_prior=True) >>> print(clf.predict(X[2])) [3] References ---------- C.D. Manning, P. Raghavan and H. Schuetze (2008). Introduction to Information Retrieval. Cambridge University Press, pp. 234-265. http://nlp.stanford.edu/IR-book/html/htmledition/the-bernoulli-model-1.html A. McCallum and K. Nigam (1998). A comparison of event models for naive Bayes text classification. Proc. AAAI/ICML-98 Workshop on Learning for Text Categorization, pp. 41-48. V. Metsis, I. Androutsopoulos and G. Paliouras (2006). Spam filtering with naive Bayes -- Which naive Bayes? 3rd Conf. on Email and Anti-Spam (CEAS). """ def __init__(self, alpha=1.0, binarize=.0, fit_prior=True, class_prior=None): self.alpha = alpha self.binarize = binarize self.fit_prior = fit_prior self.class_prior = class_prior def _count(self, X, Y): """Count and smooth feature occurrences.""" if self.binarize is not None: X = binarize(X, threshold=self.binarize) self.feature_count_ += safe_sparse_dot(Y.T, X) self.class_count_ += Y.sum(axis=0) def _update_feature_log_prob(self): """Apply smoothing to raw counts and recompute log probabilities""" smoothed_fc = self.feature_count_ + self.alpha smoothed_cc = self.class_count_ + self.alpha * 2 self.feature_log_prob_ = (np.log(smoothed_fc) - np.log(smoothed_cc.reshape(-1, 1))) def _joint_log_likelihood(self, X): """Calculate the posterior log probability of the samples X""" check_is_fitted(self, "classes_") X = check_array(X, accept_sparse='csr') if self.binarize is not None: X = binarize(X, threshold=self.binarize) n_classes, n_features = self.feature_log_prob_.shape n_samples, n_features_X = X.shape if n_features_X != n_features: raise ValueError("Expected input with %d features, got %d instead" % (n_features, n_features_X)) neg_prob = np.log(1 - np.exp(self.feature_log_prob_)) # Compute neg_prob · (1 - X).T as ∑neg_prob - X · neg_prob jll = safe_sparse_dot(X, (self.feature_log_prob_ - neg_prob).T) jll += self.class_log_prior_ + neg_prob.sum(axis=1) return jll
bsd-3-clause
mikekestemont/ruzicka
code/04latin_test_o2.py
1
3340
from __future__ import print_function import os import time import json import pickle import sys from itertools import product, combinations import matplotlib import matplotlib.pyplot as plt import seaborn as sns import pandas as pd import numpy as np from sklearn.preprocessing import LabelEncoder from ruzicka.utilities import binarize from ruzicka.vectorization import Vectorizer from ruzicka.utilities import load_pan_dataset, train_dev_split, get_vocab_size from sklearn.cross_validation import train_test_split from ruzicka.score_shifting import ScoreShifter from ruzicka.evaluation import pan_metrics from ruzicka.Order2Verifier import Order2Verifier as Verifier import ruzicka.art as art # run script for top-5 metrics ngram_type = 'word' ngram_size = 1 base = 'profile' vector_space = 'tf_std' metric = 'cosine' nb_bootstrap_iter = 100 rnd_prop = 0.5 nb_imposters = 30 mfi = sys.maxint min_df = 2 # get imposter data: train_data, _ = load_pan_dataset('../data/latin/dev') # ignore unknown documents train_labels, train_documents = zip(*train_data) # get test data: test_data, _ = load_pan_dataset('../data/latin/test') # ignore unknown documents test_labels, test_documents = zip(*test_data) # fit encoder for author labels: label_encoder = LabelEncoder() label_encoder.fit(train_labels+test_labels) train_ints = label_encoder.transform(train_labels) test_ints = label_encoder.transform(test_labels) # fit vectorizer: vectorizer = Vectorizer(mfi = mfi, vector_space = vector_space, ngram_type = ngram_type, ngram_size = ngram_size) vectorizer.fit(train_documents+test_documents) train_X = vectorizer.transform(train_documents).toarray() test_X = vectorizer.transform(test_documents).toarray() cols = ['label'] for test_author in sorted(set(test_ints)): auth_label = label_encoder.inverse_transform([test_author])[0] cols.append(auth_label) proba_df = pd.DataFrame(columns=cols) for idx in range(len(test_documents)): target_auth = test_ints[idx] target_docu = test_X[idx] non_target_test_ints = np.array([test_ints[i] for i in range(len(test_ints)) if i != idx]) non_target_test_X = np.array([test_X[i] for i in range(len(test_ints)) if i != idx]) tmp_train_X = np.vstack((train_X, non_target_test_X)) tmp_train_y = np.hstack((train_ints, non_target_test_ints)) tmp_test_X, tmp_test_y = [], [] for t_auth in sorted(set(test_ints)): tmp_test_X.append(target_docu) tmp_test_y.append(t_auth) # fit the verifier: verifier = Verifier(metric = metric, base = base, nb_bootstrap_iter = nb_bootstrap_iter, rnd_prop = rnd_prop) verifier.fit(tmp_train_X, tmp_train_y) probas = verifier.predict_proba(test_X = tmp_test_X, test_y = tmp_test_y, nb_imposters = nb_imposters) row = [label_encoder.inverse_transform([target_auth])[0]] # author label row += list(probas) print(row) proba_df.loc[len(proba_df)] = row proba_df = proba_df.set_index('label') # write away score tables: table_dir = '../output/tables/' if not os.path.isdir(table_dir): os.mkdir(table_dir) proba_df.to_csv(table_dir+'lat_proba_'+metric+'_'+vector_space+'.csv')
mit
debsankha/bedtime-programming
ls222/visual-lotka.py
1
5120
#!/usr/bin/env python from math import * import thread import random import time import pygtk pygtk.require("2.0") import gtk import gtk.glade import commands import matplotlib.pyplot class rodent: def __init__(self): self.time_from_last_childbirth=0 class felix: def __init__(self): self.size=0 self.is_virgin=1 self.reproduction_gap=0 self.time_from_last_childbirth=0 self.age=0 # print 'painted' class gui_display: def __init__(self): self.gladefile='./lvshort.glade' self.wTree = gtk.glade.XML(self.gladefile) dic={"on_start_clicked":self.dynamics,"on_mainwin_destroy":gtk.main_quit} self.wTree.signal_autoconnect(dic) self.wTree.get_widget("mainwin").show() self.wTree.get_widget("image").set_from_file("./start.png") def visualize(self,catn,mousen): # while True: num=40 size=10 catno=catn*num**2/(catn+mousen) cats=random.sample(range(num**2),catno) for i in range(num**2): if i in cats: self.dic[i].color=visual.color.red else : self.dic[i].color=visual.color.green def dynamics(self,*args,**kwargs): self.wTree.get_widget("image").set_from_file("./wait.png") print 'dynamics started' mouse_size=20 #ind parameter cat_mature_size=60 #ind parameter # catch_rate=5*10**-4 #parameter # cat_efficiency=0.8 #parameter # a=0.2 #will get from slider # c=0.2 #will get from slider cat_catch_rate=self.wTree.get_widget("catchrate").get_value()*10**-4 #parameter cat_efficiency=self.wTree.get_widget("efficiency").get_value() #parameter a=self.wTree.get_widget("a").get_value() #parameter c=self.wTree.get_widget("c").get_value() #parameter mouse_no=1000 cat_no=1000 t=0 tmax=200 dt=1 timeli=[] miceli=[] catli=[] mice=[rodent() for i in range(mouse_no)] cats=[felix() for i in range(cat_no)] catn=len(cats) mousen=len(mice) self.dic={} num=40 size=10 catno=catn*num**2/(catn+mousen) disp_cats=random.sample(range(num**2),catno) if self.wTree.get_widget("anim").get_active()==1: print 'yay!' for i in range(num**2): coords=((i%num)*size*2-num*size,(i/num)*size*2-num*size) if i in disp_cats: self.dic[i]=visual.sphere(pos=coords,radius=size,color=visual.color.red) else : self.dic[i]=visual.sphere(pos=coords,radius=size,color=visual.color.green) print self.dic catn=len(cats) mousen=len(mice) data=open('tempdata.dat','w') timestart=time.time() while (len(mice)>0 or len(cats)>0) and t<tmax and (time.time()-timestart)<60: # print time.time()-timestart catn=len(cats) mousen=len(mice) if self.wTree.get_widget("anim").get_active()==1: print 'yay!' # self.visualize(catn,mousen) thread.start_new_thread(self.visualize,(catn,mousen)) for mouse in mice: if mouse.time_from_last_childbirth>=1/a: mouse.time_from_last_childbirth=0 mice.append(rodent()) mouse.time_from_last_childbirth+=dt ind=0 while ind<len(cats): cat=cats[ind] cat.age+=dt num=cat_catch_rate*dt*len(mice) for i in range(int(num)): caught=random.randint(0,len(mice)-1) cat.size+=mouse_size*cat_efficiency #size increases mice.pop(caught) if (num-int(num))>random.uniform(0,1): caught=random.randint(0,len(mice)-1) cat.size+=mouse_size*cat_efficiency #size increases mice.pop(caught) if cat.size>cat_mature_size: if cat.is_virgin: cat.is_virgin=0 cat.reproduction_gap=cat.age cats.append(felix()) else : if cat.time_from_last_childbirth>cat.reproduction_gap: cats.append(felix()) cat.time_from_last_childbirth=0 if cat.is_virgin==0: cat.time_from_last_childbirth+=dt if len(cats)>0: if c*dt*2*atan(0.05*len(cats))/pi>random.uniform(0,1): cats.pop(ind) else : ind+=1 else : ind+=1 timeli.append(t) miceli.append(len(mice)) catli.append(len(cats)) print t,'\t',len(mice),'\t',len(cats) print >> data, t,'\t',len(mice),'\t',len(cats) t+=dt data.close() upper_limit=1.2*len(mice) pltfile=open('lv.plt','w') print >> pltfile,"""se te png se o "/tmp/lv.png" unse ke #se yrange [0:%f] se xl "Time" se yl "Number of Prey/Predator" p 'tempdata.dat' u 1:2 w l,'tempdata.dat' u 1:3 w l """%upper_limit pltfile.close() commands.getoutput('gnuplot lv.plt') self.wTree.get_widget("image").set_from_file("/tmp/lv.png") print 'dynamics ended' reload(matplotlib.pyplot) matplotlib.pyplot.plot(timeli,catli,'g-')#timeli,catli,'r-') matplotlib.pyplot.xlabel("Time") matplotlib.pyplot.ylabel("Number of mice and cats") matplotlib.pyplot.show() gui=gui_display() gtk.main() #dynamics() #import matplotlib.pyplot as plt #plt.plot(timeli,miceli,'go',timeli,catli,'ro') #plt.show()
gpl-3.0
banesullivan/ParaViewGeophysics
PVGeo/ubc/tensor.py
1
21910
__all__ = [ 'TensorMeshReader', 'TensorMeshAppender', 'TopoMeshAppender', ] __displayname__ = 'Tensor Mesh' import os import sys import numpy as np import pandas as pd import vtk from .. import _helpers, interface from ..base import AlgorithmBase from .two_file_base import ModelAppenderBase, ubcMeshReaderBase if sys.version_info < (3,): from StringIO import StringIO else: from io import StringIO class TensorMeshReader(ubcMeshReaderBase): """UBC Mesh 2D/3D models are defined using a 2-file format. The "mesh" file describes how the data is discretized. The "model" file lists the physical property values for all cells in a mesh. A model file is meaningless without an associated mesh file. The reader will automatically detect if the mesh is 2D or 3D and read the remainder of the data with that dimensionality assumption. If the mesh file is 2D, then then model file must also be in the 2D format (same for 3D). Note: Model File is optional. Reader will still construct ``vtkRectilinearGrid`` safely. """ __displayname__ = 'UBC Tensor Mesh Reader' __category__ = 'reader' description = 'PVGeo: UBC Mesh 2D/3D Two-File Format' def __init__(self, nOutputPorts=1, outputType='vtkRectilinearGrid', **kwargs): ubcMeshReaderBase.__init__( self, nOutputPorts=nOutputPorts, outputType=outputType, **kwargs ) self.__mesh = vtk.vtkRectilinearGrid() self.__models = [] @staticmethod def place_model_on_mesh(mesh, model, data_name='Data'): """Places model data onto a mesh. This is for the UBC Grid data reaers to associate model data with the mesh grid. Args: mesh (vtkRectilinearGrid): The ``vtkRectilinearGrid`` that is the mesh to place the model data upon. model (np.array): A NumPy float array that holds all of the data to place inside of the mesh's cells. data_name (str) : The name of the model data array once placed on the ``vtkRectilinearGrid``. Return: vtkRectilinearGrid : Returns the input ``vtkRectilinearGrid`` with model data appended. """ if isinstance(model, dict): for key in model.keys(): TensorMeshReader.place_model_on_mesh(mesh, model[key], data_name=key) return mesh # model.GetNumberOfValues() if model is vtkDataArray # Make sure this model file fits the dimensions of the mesh ext = mesh.GetExtent() n1, n2, n3 = ext[1], ext[3], ext[5] if n1 * n2 * n3 < len(model): raise _helpers.PVGeoError( 'Model `%s` has more data than the given mesh has cells to hold.' % data_name ) elif n1 * n2 * n3 > len(model): raise _helpers.PVGeoError( 'Model `%s` does not have enough data to fill the given mesh\'s cells.' % data_name ) # Swap axes because VTK structures the coordinates a bit differently # - This is absolutely crucial! # - Do not play with unless you know what you are doing! if model.ndim > 1 and model.ndim < 3: ncomp = model.shape[1] model = np.reshape(model, (n1, n2, n3, ncomp)) model = np.swapaxes(model, 0, 1) model = np.swapaxes(model, 0, 2) # Now reverse Z axis model = model[::-1, :, :, :] # Note it is in Fortran ordering model = np.reshape(model, (n1 * n2 * n3, ncomp)) else: model = np.reshape(model, (n1, n2, n3)) model = np.swapaxes(model, 0, 1) model = np.swapaxes(model, 0, 2) # Now reverse Z axis model = model[::-1, :, :] # Note it is in Fortran ordering model = model.flatten() # Convert data to VTK data structure and append to output c = interface.convert_array(model, name=data_name, deep=True) # THIS IS CELL DATA! Add the model data to CELL data: mesh.GetCellData().AddArray(c) return mesh # ------------------------------------------------------------------# # ---------------------- UBC MESH 2D ------------------------# # ------------------------------------------------------------------# @staticmethod def ubc_mesh_2d(FileName, output): """This method reads a UBC 2D Mesh file and builds an empty ``vtkRectilinearGrid`` for data to be inserted into. `Format Specs`_. .. _Format Specs: http://giftoolscookbook.readthedocs.io/en/latest/content/fileFormats/mesh2Dfile.html Args: FileName (str) : The mesh filename as an absolute path for the input mesh file in UBC 3D Mesh Format. output (vtkRectilinearGrid) : The output data object Return: vtkRectilinearGrid : a ``vtkRectilinearGrid`` generated from the UBC 3D Mesh grid. Mesh is defined by the input mesh file. No data attributes here, simply an empty mesh. Use the ``place_model_on_mesh()`` method to associate with model data. """ # Read in data from file xpts, xdisc, zpts, zdisc = ubcMeshReaderBase._ubc_mesh_2d_part(FileName) nx = np.sum(np.array(xdisc, dtype=int)) + 1 nz = np.sum(np.array(zdisc, dtype=int)) + 1 # Now generate the vtkRectilinear Grid def _genCoords(pts, disc, z=False): c = [float(pts[0])] for i in range(len(pts) - 1): start = float(pts[i]) stop = float(pts[i + 1]) num = int(disc[i]) w = (stop - start) / num for j in range(1, num): c.append(start + (j) * w) c.append(stop) c = np.array(c, dtype=float) if z: c = -c[::-1] return interface.convert_array(c, deep=True) xcoords = _genCoords(xpts, xdisc) zcoords = _genCoords(zpts, zdisc, z=True) ycoords = interface.convert_array(np.zeros(1), deep=True) output.SetDimensions(nx, 2, nz) # note this subtracts 1 output.SetXCoordinates(xcoords) output.SetYCoordinates(ycoords) output.SetZCoordinates(zcoords) return output @staticmethod def ubc_model_2d(FileName): """Reads a 2D model file and returns a 1D NumPy float array. Use the ``place_model_on_mesh()`` method to associate with a grid. Note: Only supports single component data Args: FileName (str) : The model filename as an absolute path for the input model file in UBCMesh Model Format. Also accepts a list of string file names. Return: np.array : a NumPy float array that holds the model data read from the file. Use the ``place_model_on_mesh()`` method to associate with a grid. If a list of file names is given then it will return a dictionary of NumPy float array with keys as the basenames of the files. """ if isinstance(FileName, (list, tuple)): out = {} for f in FileName: out[os.path.basename(f)] = TensorMeshReader.ubc_model_2d(f) return out dim = np.genfromtxt( FileName, dtype=int, delimiter=None, comments='!', max_rows=1 ) names = ['col%d' % i for i in range(dim[0])] df = pd.read_csv( FileName, names=names, delim_whitespace=True, skiprows=1, comment='!' ) data = df.values if np.shape(data)[0] != dim[1] and np.shape(data)[1] != dim[0]: raise _helpers.PVGeoError('Mode file `%s` improperly formatted.' % FileName) return data.flatten(order='F') def __ubc_mesh_data_2d(self, filename_mesh, filename_models, output): """Helper method to read a 2D mesh""" # Construct/read the mesh if self.need_to_readMesh(): TensorMeshReader.ubc_mesh_2d(filename_mesh, self.__mesh) self.need_to_readMesh(flag=False) output.DeepCopy(self.__mesh) if self.need_to_readModels() and self.this_has_models(): self.__models = [] for f in filename_models: # Read the model data self.__models.append(TensorMeshReader.ubc_model_2d(f)) self.need_to_readModels(flag=False) return output # ------------------------------------------------------------------# # ---------------------- UBC MESH 3D ------------------------# # ------------------------------------------------------------------# @staticmethod def ubc_mesh_3d(FileName, output): """This method reads a UBC 3D Mesh file and builds an empty ``vtkRectilinearGrid`` for data to be inserted into. Args: FileName (str) : The mesh filename as an absolute path for the input mesh file in UBC 3D Mesh Format. output (vtkRectilinearGrid) : The output data object Return: vtkRectilinearGrid : a ``vtkRectilinearGrid`` generated from the UBC 3D Mesh grid. Mesh is defined by the input mesh file. No data attributes here, simply an empty mesh. Use the ``place_model_on_mesh()`` method to associate with model data. """ # --- Read in the mesh ---# fileLines = np.genfromtxt(FileName, dtype=str, delimiter='\n', comments='!') # Get mesh dimensions dim = np.array(fileLines[0].split('!')[0].split(), dtype=int) dim = (dim[0] + 1, dim[1] + 1, dim[2] + 1) # The origin corner (Southwest-top) # - Remember UBC format specifies down as the positive Z # - Easting, Northing, Altitude oo = np.array(fileLines[1].split('!')[0].split(), dtype=float) ox, oy, oz = oo[0], oo[1], oo[2] # Read cell sizes for each line in the UBC mesh files def _readCellLine(line): line_list = [] for seg in line.split(): if '*' in seg: sp = seg.split('*') seg_arr = np.ones((int(sp[0]),), dtype=float) * float(sp[1]) else: seg_arr = np.array([float(seg)], dtype=float) line_list.append(seg_arr) return np.concatenate(line_list) # Read the cell sizes cx = _readCellLine(fileLines[2].split('!')[0]) cy = _readCellLine(fileLines[3].split('!')[0]) cz = _readCellLine(fileLines[4].split('!')[0]) # Invert the indexing of the vector to start from the bottom. cz = cz[::-1] # Adjust the reference point to the bottom south west corner oz = oz - np.sum(cz) # Now generate the coordinates for from cell width and origin cox = ox + np.cumsum(cx) cox = np.insert(cox, 0, ox) coy = oy + np.cumsum(cy) coy = np.insert(coy, 0, oy) coz = oz + np.cumsum(cz) coz = np.insert(coz, 0, oz) # Set the dims and coordinates for the output output.SetDimensions(dim[0], dim[1], dim[2]) # Convert to VTK array for setting coordinates output.SetXCoordinates(interface.convert_array(cox, deep=True)) output.SetYCoordinates(interface.convert_array(coy, deep=True)) output.SetZCoordinates(interface.convert_array(coz, deep=True)) return output def __ubc_mesh_data_3d(self, filename_mesh, filename_models, output): """Helper method to read a 3D mesh""" # Construct/read the mesh if self.need_to_readMesh(): TensorMeshReader.ubc_mesh_3d(filename_mesh, self.__mesh) self.need_to_readMesh(flag=False) output.DeepCopy(self.__mesh) if self.need_to_readModels() and self.this_has_models(): self.__models = [] for f in filename_models: # Read the model data self.__models.append(TensorMeshReader.ubc_model_3d(f)) self.need_to_readModels(flag=False) return output def __ubc_tensor_mesh(self, filename_mesh, filename_models, output): """Wrapper to Read UBC GIF 2D and 3D meshes. UBC Mesh 2D/3D models are defined using a 2-file format. The "mesh" file describes how the data is descritized. The "model" file lists the physical property values for all cells in a mesh. A model file is meaningless without an associated mesh file. If the mesh file is 2D, then then model file must also be in the 2D format (same for 3D). Args: filename_mesh (str) : The mesh filename as an absolute path for the input mesh file in UBC 2D/3D Mesh Format filename_models (str or list(str)) : The model filename(s) as an absolute path for the input model file in UBC 2D/3D Model Format. output (vtkRectilinearGrid) : The output data object Return: vtkRectilinearGrid : a ``vtkRectilinearGrid`` generated from the UBC 2D/3D Mesh grid. Mesh is defined by the input mesh file. Cell data is defined by the input model file. """ # Check if the mesh is a UBC 2D mesh if self.is_2d(): self.__ubc_mesh_data_2d(filename_mesh, filename_models, output) # Check if the mesh is a UBC 3D mesh elif self.is_3d(): self.__ubc_mesh_data_3d(filename_mesh, filename_models, output) else: raise _helpers.PVGeoError('File format not recognized') return output def RequestData(self, request, inInfo, outInfo): """Handles data request by the pipeline.""" # Get output: output = self.GetOutputData(outInfo, 0) # Get requested time index i = _helpers.get_requested_time(self, outInfo) self.__ubc_tensor_mesh( self.get_mesh_filename(), self.get_model_filenames(), output ) # Place the model data for given timestep onto the mesh if len(self.__models) > i: TensorMeshReader.place_model_on_mesh( output, self.__models[i], self.get_data_name() ) return 1 def RequestInformation(self, request, inInfo, outInfo): """Handles info request by pipeline about timesteps and grid extents.""" # Call parent to handle time stuff ubcMeshReaderBase.RequestInformation(self, request, inInfo, outInfo) # Now set whole output extent if self.need_to_readMesh(): ext = self._read_extent() info = outInfo.GetInformationObject(0) # Set WHOLE_EXTENT: This is absolutely necessary info.Set(vtk.vtkStreamingDemandDrivenPipeline.WHOLE_EXTENT(), ext, 6) return 1 def clear_mesh(self): """Use to clean/rebuild the mesh""" self.__mesh = vtk.vtkRectilinearGrid() ubcMeshReaderBase.clear_models(self) def clear_models(self): """Use to clean the models and reread""" self.__models = [] ubcMeshReaderBase.clear_models(self) ############################################################################### class TensorMeshAppender(ModelAppenderBase): """This filter reads a timeseries of models and appends it to an input ``vtkRectilinearGrid`` """ __displayname__ = 'UBC Tensor Mesh Appender' __category__ = 'filter' def __init__(self, **kwargs): ModelAppenderBase.__init__( self, inputType='vtkRectilinearGrid', outputType='vtkRectilinearGrid', **kwargs ) def _read_up_front(self): """Internal helepr to read data at start""" reader = ubcMeshReaderBase.ubc_model_3d if not self._is_3D: # Note how in UBC format, 2D grids are specified on an XZ plane (no Y component) # This will only work prior to rotations to account for real spatial reference reader = TensorMeshReader.ubc_model_2d self._models = [] for f in self._model_filenames: # Read the model data self._models.append(reader(f)) self.need_to_read(flag=False) return def _place_on_mesh(self, output, idx=0): """Internal helepr to place a model on the mesh for a given index""" TensorMeshReader.place_model_on_mesh( output, self._models[idx], self.get_data_name() ) return ############################################################################### class TopoMeshAppender(AlgorithmBase): """This filter reads a single discrete topography file and appends it as a boolean data array. """ __displayname__ = 'Append UBC Discrete Topography' __category__ = 'filter' def __init__( self, inputType='vtkRectilinearGrid', outputType='vtkRectilinearGrid', **kwargs ): AlgorithmBase.__init__( self, nInputPorts=1, inputType=inputType, nOutputPorts=1, outputType=outputType, ) self._topoFileName = kwargs.get('filename', None) self.__indices = None self.__need_to_read = True self.__ne, self.__nn = None, None def need_to_read(self, flag=None): """Ask self if the reader needs to read the files again Args: flag (bool): if the flag is set then this method will set the read status Return: bool: The status of the reader aspect of the filter. """ if flag is not None and isinstance(flag, (bool, int)): self.__need_to_read = flag return self.__need_to_read def Modified(self, read_again=True): """Call modified if the files needs to be read again again.""" if read_again: self.__need_to_read = read_again AlgorithmBase.Modified(self) def modified(self, read_again=True): """Call modified if the files needs to be read again again.""" return self.Modified(read_again=read_again) def _read_up_front(self): """Internal helepr to read data at start""" # Read the file content = np.genfromtxt( self._topoFileName, dtype=str, delimiter='\n', comments='!' ) dim = content[0].split() self.__ne, self.__nn = int(dim[0]), int(dim[1]) self.__indices = pd.read_csv( StringIO("\n".join(content[1::])), names=['i', 'j', 'k'], delim_whitespace=True, ) # NOTE: K indices are inverted self.need_to_read(flag=False) return def _place_on_mesh(self, output): """Internal helepr to place an active cells model on the mesh""" # Check mesh extents to math topography nx, ny, nz = output.GetDimensions() nx, ny, nz = nx - 1, ny - 1, nz - 1 # because GetDimensions counts the nodes topz = np.max(self.__indices['k']) + 1 if nx != self.__nn or ny != self.__ne or topz > nz: raise _helpers.PVGeoError( 'Dimension mismatch between input grid and topo file.' ) # # Adjust the k indices to be in caarteian system # self.__indices['k'] = nz - self.__indices['k'] # Fill out the topo and add it as model as it will be in UBC format # Create a 3D array of 1s and zeros (1 means beneath topo or active) topo = np.empty((ny, nx, nz), dtype=float) topo[:] = np.nan for row in self.__indices.values: i, j, k = row topo[i, j, k + 1 :] = 0 topo[i, j, : k + 1] = 1 # Add as model... ``place_model_on_mesh`` handles the rest TensorMeshReader.place_model_on_mesh( output, topo.flatten(), 'Active Topography' ) return def RequestData(self, request, inInfo, outInfo): """Used by pipeline to generate output""" # Get input/output of Proxy pdi = self.GetInputData(inInfo, 0, 0) output = self.GetOutputData(outInfo, 0) output.DeepCopy(pdi) # ShallowCopy if you want changes to propagate upstream # Perfrom task: if self.__need_to_read: self._read_up_front() # Place the model data for given timestep onto the mesh self._place_on_mesh(output) return 1 #### Setters and Getters #### def clear_topo_file(self): """Use to clear data file name.""" self._topoFileName = None self.Modified(read_again=True) def set_topo_filename(self, filename): """Use to set the file names for the reader. Handles single strings only""" if filename is None: return # do nothing if None is passed by a constructor on accident elif isinstance(filename, str) and self._topoFileName != filename: self._topoFileName = filename self.Modified() return 1 ############################################################################### # # import numpy as np # indices = np.array([[0,0,1], # [0,1,1], # [0,2,1], # [1,0,1], # [1,1,1], # [1,2,1], # [2,0,1], # [2,1,1], # [2,2,1], # ]) # # topo = np.empty((3,3,3), dtype=float) # topo[:] = np.nan # # for row in indices: # i, j, k = row # topo[i, j, k:] = 0 # topo[i, j, :k] = 1 # topo
bsd-3-clause
tapomayukh/projects_in_python
rapid_categorization/haptic_map/outlier/hmm_crossvalidation_force.py
1
19066
# Hidden Markov Model Implementation import pylab as pyl import numpy as np import matplotlib.pyplot as pp #from enthought.mayavi import mlab import scipy as scp import scipy.ndimage as ni import roslib; roslib.load_manifest('sandbox_tapo_darpa_m3') import rospy #import hrl_lib.mayavi2_util as mu import hrl_lib.viz as hv import hrl_lib.util as ut import hrl_lib.matplotlib_util as mpu import pickle import unittest import ghmm import ghmmwrapper import random import sys sys.path.insert(0, '/home/tapo/svn/robot1_data/usr/tapo/data_code/Classification/Data/Single_Contact_HMM/Variable_length') from data_variable_length_force import Fmat_original if __name__ == '__main__' or __name__ != '__main__': print "Inside outlier HMM model training file" Fmat = Fmat_original # Getting mean / covariance i = 0 number_states = 10 feature_1_final_data = [0.0]*number_states state_1 = [0.0] while (i < 35): data_length = len(Fmat[i]) feature_length = data_length/1 sample_length = feature_length/number_states Feature_1 = Fmat[i][0:feature_length] if i == 0: j = 0 while (j < number_states): feature_1_final_data[j] = Feature_1[sample_length*j:sample_length*(j+1)] j=j+1 else: j = 0 while (j < number_states): state_1 = Feature_1[sample_length*j:sample_length*(j+1)] #print np.shape(state_1) #print np.shape(feature_1_final_data[j]) feature_1_final_data[j] = feature_1_final_data[j]+state_1 j=j+1 i = i+1 j = 0 mu_rf_force = np.zeros((number_states,1)) sigma_rf = np.zeros((number_states,1)) while (j < number_states): mu_rf_force[j] = np.mean(feature_1_final_data[j]) sigma_rf[j] = scp.std(feature_1_final_data[j]) j = j+1 i = 35 feature_1_final_data = [0.0]*number_states state_1 = [0.0] while (i < 70): data_length = len(Fmat[i]) feature_length = data_length/1 sample_length = feature_length/number_states Feature_1 = Fmat[i][0:feature_length] if i == 35: j = 0 while (j < number_states): feature_1_final_data[j] = Feature_1[sample_length*j:sample_length*(j+1)] j=j+1 else: j = 0 while (j < number_states): state_1 = Feature_1[sample_length*j:sample_length*(j+1)] feature_1_final_data[j] = feature_1_final_data[j]+state_1 j=j+1 i = i+1 j = 0 mu_rm_force = np.zeros((number_states,1)) sigma_rm = np.zeros((number_states,1)) while (j < number_states): mu_rm_force[j] = np.mean(feature_1_final_data[j]) sigma_rm[j] = scp.std(feature_1_final_data[j]) j = j+1 i = 70 feature_1_final_data = [0.0]*number_states state_1 = [0.0] while (i < 105): data_length = len(Fmat[i]) feature_length = data_length/1 sample_length = feature_length/number_states Feature_1 = Fmat[i][0:feature_length] if i == 70: j = 0 while (j < number_states): feature_1_final_data[j] = Feature_1[sample_length*j:sample_length*(j+1)] j=j+1 else: j = 0 while (j < number_states): state_1 = Feature_1[sample_length*j:sample_length*(j+1)] feature_1_final_data[j] = feature_1_final_data[j]+state_1 j=j+1 i = i+1 j = 0 mu_sf_force = np.zeros((number_states,1)) sigma_sf = np.zeros((number_states,1)) while (j < number_states): mu_sf_force[j] = np.mean(feature_1_final_data[j]) sigma_sf[j] = scp.std(feature_1_final_data[j]) j = j+1 i = 105 feature_1_final_data = [0.0]*number_states state_1 = [0.0] while (i < 140): data_length = len(Fmat[i]) feature_length = data_length/1 sample_length = feature_length/number_states Feature_1 = Fmat[i][0:feature_length] if i == 105: j = 0 while (j < number_states): feature_1_final_data[j] = Feature_1[sample_length*j:sample_length*(j+1)] j=j+1 else: j = 0 while (j < number_states): state_1 = Feature_1[sample_length*j:sample_length*(j+1)] feature_1_final_data[j] = feature_1_final_data[j]+state_1 j=j+1 i = i+1 j = 0 mu_sm_force = np.zeros((number_states,1)) sigma_sm = np.zeros((number_states,1)) while (j < number_states): mu_sm_force[j] = np.mean(feature_1_final_data[j]) sigma_sm[j] = scp.std(feature_1_final_data[j]) j = j+1 # HMM - Implementation: # 10 Hidden States # Max. Force(For now), Contact Area(Not now), and Contact Motion(Not Now) as Continuous Gaussian Observations from each hidden state # Four HMM-Models for Rigid-Fixed, Soft-Fixed, Rigid-Movable, Soft-Movable # Transition probabilities obtained as upper diagonal matrix (to be trained using Baum_Welch) # For new objects, it is classified according to which model it represenst the closest.. F = ghmm.Float() # emission domain of this model # A - Transition Matrix if number_states == 3: A = [[0.2, 0.5, 0.3], [0.0, 0.5, 0.5], [0.0, 0.0, 1.0]] elif number_states == 5: A = [[0.2, 0.35, 0.2, 0.15, 0.1], [0.0, 0.2, 0.45, 0.25, 0.1], [0.0, 0.0, 0.2, 0.55, 0.25], [0.0, 0.0, 0.0, 0.2, 0.8], [0.0, 0.0, 0.0, 0.0, 1.0]] elif number_states == 10: A = [[0.1, 0.25, 0.15, 0.15, 0.1, 0.05, 0.05, 0.05, 0.05, 0.05], [0.0, 0.1, 0.25, 0.25, 0.2, 0.1, 0.05, 0.05, 0.05, 0.05], [0.0, 0.0, 0.1, 0.25, 0.25, 0.2, 0.05, 0.05, 0.05, 0.05], [0.0, 0.0, 0.0, 0.1, 0.3, 0.30, 0.20, 0.1, 0.05, 0.05], [0.0, 0.0, 0.0, 0.0, 0.1, 0.30, 0.30, 0.20, 0.05, 0.05], [0.0, 0.0, 0.0, 0.0, 0.00, 0.1, 0.35, 0.30, 0.20, 0.05], [0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.2, 0.30, 0.30, 0.20], [0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.2, 0.50, 0.30], [0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.4, 0.60], [0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.00, 1.00]] elif number_states == 15: A = [[0.1, 0.25, 0.15, 0.15, 0.1, 0.05, 0.05, 0.05, 0.04, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01], [0.0, 0.1, 0.25, 0.25, 0.2, 0.1, 0.05, 0.05, 0.04, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01], [0.0, 0.0, 0.1, 0.25, 0.25, 0.2, 0.05, 0.05, 0.04, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01], [0.0, 0.0, 0.0, 0.1, 0.3, 0.30, 0.20, 0.1, 0.04, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01], [0.0, 0.0, 0.0, 0.0, 0.1, 0.30, 0.30, 0.20, 0.04, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01], [0.0, 0.0, 0.0, 0.0, 0.00, 0.1, 0.35, 0.30, 0.15, 0.05, 0.01, 0.01, 0.01, 0.01, 0.01], [0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.1, 0.30, 0.30, 0.10, 0.05, 0.05, 0.05, 0.03, 0.02], [0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.1, 0.30, 0.30, 0.10, 0.05, 0.05, 0.05, 0.05], [0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.1, 0.30, 0.20, 0.15, 0.10, 0.10, 0.05], [0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.00, 0.1, 0.30, 0.20, 0.15, 0.15, 0.10], [0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.00, 0.0, 0.1, 0.30, 0.30, 0.20, 0.10], [0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.00, 0.0, 0.0, 0.1, 0.40, 0.30, 0.20], [0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.00, 0.0, 0.0, 0.0, 0.20, 0.50, 0.30], [0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.00, 0.0, 0.0, 0.0, 0.00, 0.40, 0.60], [0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.0, 0.0, 0.0, 0.00, 1.00]] elif number_states == 20: A = [[0.1, 0.25, 0.15, 0.15, 0.1, 0.05, 0.05, 0.03, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01], [0.0, 0.1, 0.25, 0.25, 0.2, 0.1, 0.05, 0.03, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01], [0.0, 0.0, 0.1, 0.25, 0.25, 0.2, 0.05, 0.03, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01], [0.0, 0.0, 0.0, 0.1, 0.3, 0.30, 0.20, 0.09, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01], [0.0, 0.0, 0.0, 0.0, 0.1, 0.30, 0.30, 0.15, 0.04, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01], [0.0, 0.0, 0.0, 0.0, 0.00, 0.1, 0.35, 0.30, 0.10, 0.05, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01], [0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.1, 0.30, 0.20, 0.10, 0.05, 0.05, 0.05, 0.03, 0.02, 0.02, 0.02, 0.02, 0.02, 0.02], [0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.1, 0.30, 0.20, 0.10, 0.05, 0.05, 0.05, 0.05, 0.02, 0.02, 0.02, 0.02, 0.02], [0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.1, 0.30, 0.20, 0.15, 0.05, 0.05, 0.05, 0.02, 0.02, 0.02, 0.02, 0.02], [0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.00, 0.1, 0.30, 0.20, 0.15, 0.10, 0.05, 0.02, 0.02, 0.02, 0.02, 0.02], [0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.00, 0.0, 0.1, 0.30, 0.30, 0.10, 0.10, 0.02, 0.02, 0.02, 0.02, 0.02], [0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.00, 0.0, 0.0, 0.1, 0.40, 0.30, 0.10, 0.02, 0.02, 0.02, 0.02, 0.02], [0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.00, 0.0, 0.0, 0.0, 0.20, 0.40, 0.20, 0.10, 0.04, 0.02, 0.02, 0.02], [0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.00, 0.0, 0.0, 0.0, 0.00, 0.20, 0.40, 0.20, 0.10, 0.05, 0.03, 0.02], [0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.0, 0.0, 0.0, 0.00, 0.20, 0.40, 0.20, 0.10, 0.05, 0.05], [0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.0, 0.0, 0.0, 0.00, 0.00, 0.20, 0.40, 0.20, 0.10, 0.10], [0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.20, 0.40, 0.20, 0.20], [0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.30, 0.50, 0.20], [0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.00, 0.40, 0.60], [0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 1.00]] # B - Emission Matrix, parameters of emission distributions in pairs of (mu, sigma) B_rf = [0.0]*number_states B_rm = [0.0]*number_states B_sf = [0.0]*number_states B_sm = [0.0]*number_states for num_states in range(number_states): B_rf[num_states] = [mu_rf_force[num_states][0],sigma_rf[num_states][0]] B_rm[num_states] = [mu_rm_force[num_states][0],sigma_rm[num_states][0]] B_sf[num_states] = [mu_sf_force[num_states][0],sigma_sf[num_states][0]] B_sm[num_states] = [mu_sm_force[num_states][0],sigma_sm[num_states][0]] #print B_sm #print mu_sm_motion # pi - initial probabilities per state if number_states == 3: pi = [1./3.] * 3 elif number_states == 5: pi = [0.2] * 5 elif number_states == 10: pi = [0.1] * 10 elif number_states == 15: pi = [1./15.] * 15 elif number_states == 20: pi = [0.05] * 20 # generate RF, RM, SF, SM models from parameters model_rf = ghmm.HMMFromMatrices(F,ghmm.GaussianDistribution(F), A, B_rf, pi) # Will be Trained model_rm = ghmm.HMMFromMatrices(F,ghmm.GaussianDistribution(F), A, B_rm, pi) # Will be Trained model_sf = ghmm.HMMFromMatrices(F,ghmm.GaussianDistribution(F), A, B_sf, pi) # Will be Trained model_sm = ghmm.HMMFromMatrices(F,ghmm.GaussianDistribution(F), A, B_sm, pi) # Will be Trained trial_number = 1 rf_final = np.matrix(np.zeros((28,1))) rm_final = np.matrix(np.zeros((28,1))) sf_final = np.matrix(np.zeros((28,1))) sm_final = np.matrix(np.zeros((28,1))) total_seq = Fmat for i in range(140): total_seq[i][:] = sum(total_seq[i][:],[]) while (trial_number < 6): # For Training if (trial_number == 1): j = 5 total_seq_rf = total_seq[1:5] total_seq_rm = total_seq[36:40] total_seq_sf = total_seq[71:75] total_seq_sm = total_seq[106:110] #print total_seq_rf while (j < 35): total_seq_rf = total_seq_rf+total_seq[j+1:j+5] total_seq_rm = total_seq_rm+total_seq[j+36:j+40] total_seq_sf = total_seq_sf+total_seq[j+71:j+75] total_seq_sm = total_seq_sm+total_seq[j+106:j+110] j = j+5 if (trial_number == 2): j = 5 total_seq_rf = [total_seq[0]]+total_seq[2:5] total_seq_rm = [total_seq[35]]+total_seq[37:40] total_seq_sf = [total_seq[70]]+total_seq[72:75] total_seq_sm = [total_seq[105]]+total_seq[107:110] #print total_seq_rf while (j < 35): total_seq_rf = total_seq_rf+[total_seq[j+0]]+total_seq[j+2:j+5] total_seq_rm = total_seq_rm+[total_seq[j+35]]+total_seq[j+37:j+40] total_seq_sf = total_seq_sf+[total_seq[j+70]]+total_seq[j+72:j+75] total_seq_sm = total_seq_sm+[total_seq[j+105]]+total_seq[j+107:j+110] j = j+5 if (trial_number == 3): j = 5 total_seq_rf = total_seq[0:2]+total_seq[3:5] total_seq_rm = total_seq[35:37]+total_seq[38:40] total_seq_sf = total_seq[70:72]+total_seq[73:75] total_seq_sm = total_seq[105:107]+total_seq[108:110] while (j < 35): total_seq_rf = total_seq_rf+total_seq[j+0:j+2]+total_seq[j+3:j+5] total_seq_rm = total_seq_rm+total_seq[j+35:j+37]+total_seq[j+38:j+40] total_seq_sf = total_seq_sf+total_seq[j+70:j+72]+total_seq[j+73:j+75] total_seq_sm = total_seq_sm+total_seq[j+105:j+107]+total_seq[j+108:j+110] j = j+5 if (trial_number == 4): j = 5 total_seq_rf = total_seq[0:3]+total_seq[4:5] total_seq_rm = total_seq[35:38]+total_seq[39:40] total_seq_sf = total_seq[70:73]+total_seq[74:75] total_seq_sm = total_seq[105:108]+total_seq[109:110] while (j < 35): total_seq_rf = total_seq_rf+total_seq[j+0:j+3]+total_seq[j+4:j+5] total_seq_rm = total_seq_rm+total_seq[j+35:j+38]+total_seq[j+39:j+40] total_seq_sf = total_seq_sf+total_seq[j+70:j+73]+total_seq[j+74:j+75] total_seq_sm = total_seq_sm+total_seq[j+105:j+108]+total_seq[j+109:j+110] j = j+5 if (trial_number == 5): j = 5 total_seq_rf = total_seq[0:4] total_seq_rm = total_seq[35:39] total_seq_sf = total_seq[70:74] total_seq_sm = total_seq[105:109] while (j < 35): total_seq_rf = total_seq_rf+total_seq[j+0:j+4] total_seq_rm = total_seq_rm+total_seq[j+35:j+39] total_seq_sf = total_seq_sf+total_seq[j+70:j+74] total_seq_sm = total_seq_sm+total_seq[j+105:j+109] j = j+5 train_seq_rf = total_seq_rf train_seq_rm = total_seq_rm train_seq_sf = total_seq_sf train_seq_sm = total_seq_sm #print train_seq_rf[27] final_ts_rf = ghmm.SequenceSet(F,train_seq_rf) final_ts_rm = ghmm.SequenceSet(F,train_seq_rm) final_ts_sf = ghmm.SequenceSet(F,train_seq_sf) final_ts_sm = ghmm.SequenceSet(F,train_seq_sm) model_rf.baumWelch(final_ts_rf) model_rm.baumWelch(final_ts_rm) model_sf.baumWelch(final_ts_sf) model_sm.baumWelch(final_ts_sm) # For Testing if (trial_number == 1): j = 5 total_seq_rf = [total_seq[0]] total_seq_rm = [total_seq[35]] total_seq_sf = [total_seq[70]] total_seq_sm = [total_seq[105]] #print np.shape(total_seq_rf) while (j < 35): total_seq_rf = total_seq_rf+[total_seq[j]] total_seq_rm = total_seq_rm+[total_seq[j+35]] total_seq_sf = total_seq_sf+[total_seq[j+70]] total_seq_sm = total_seq_sm+[total_seq[j+105]] j = j+5 if (trial_number == 2): j = 5 total_seq_rf = [total_seq[1]] total_seq_rm = [total_seq[36]] total_seq_sf = [total_seq[71]] total_seq_sm = [total_seq[106]] while (j < 35): total_seq_rf = total_seq_rf+[total_seq[j+1]] total_seq_rm = total_seq_rm+[total_seq[j+36]] total_seq_sf = total_seq_sf+[total_seq[j+71]] total_seq_sm = total_seq_sm+[total_seq[j+106]] j = j+5 if (trial_number == 3): j = 5 total_seq_rf = [total_seq[2]] total_seq_rm = [total_seq[37]] total_seq_sf = [total_seq[72]] total_seq_sm = [total_seq[107]] while (j < 35): total_seq_rf = total_seq_rf+[total_seq[j+2]] total_seq_rm = total_seq_rm+[total_seq[j+37]] total_seq_sf = total_seq_sf+[total_seq[j+72]] total_seq_sm = total_seq_sm+[total_seq[j+107]] j = j+5 if (trial_number == 4): j = 5 total_seq_rf = [total_seq[3]] total_seq_rm = [total_seq[38]] total_seq_sf = [total_seq[73]] total_seq_sm = [total_seq[108]] while (j < 35): total_seq_rf = total_seq_rf+[total_seq[j+3]] total_seq_rm = total_seq_rm+[total_seq[j+38]] total_seq_sf = total_seq_sf+[total_seq[j+73]] total_seq_sm = total_seq_sm+[total_seq[j+108]] j = j+5 if (trial_number == 5): j = 5 total_seq_rf = [total_seq[4]] total_seq_rm = [total_seq[39]] total_seq_sf = [total_seq[74]] total_seq_sm = [total_seq[109]] while (j < 35): total_seq_rf = total_seq_rf+[total_seq[j+4]] total_seq_rm = total_seq_rm+[total_seq[j+39]] total_seq_sf = total_seq_sf+[total_seq[j+74]] total_seq_sm = total_seq_sm+[total_seq[j+109]] j = j+5 trial_number = trial_number + 1 print "Outlier HMM model trained"
mit
Titan-C/scikit-learn
examples/cluster/plot_ward_structured_vs_unstructured.py
1
3369
""" =========================================================== Hierarchical clustering: structured vs unstructured ward =========================================================== Example builds a swiss roll dataset and runs hierarchical clustering on their position. For more information, see :ref:`hierarchical_clustering`. In a first step, the hierarchical clustering is performed without connectivity constraints on the structure and is solely based on distance, whereas in a second step the clustering is restricted to the k-Nearest Neighbors graph: it's a hierarchical clustering with structure prior. Some of the clusters learned without connectivity constraints do not respect the structure of the swiss roll and extend across different folds of the manifolds. On the opposite, when opposing connectivity constraints, the clusters form a nice parcellation of the swiss roll. """ # Authors : Vincent Michel, 2010 # Alexandre Gramfort, 2010 # Gael Varoquaux, 2010 # License: BSD 3 clause print(__doc__) import time as time import numpy as np import matplotlib.pyplot as plt import mpl_toolkits.mplot3d.axes3d as p3 from sklearn.cluster import AgglomerativeClustering from sklearn.datasets.samples_generator import make_swiss_roll # ############################################################################# # Generate data (swiss roll dataset) n_samples = 1500 noise = 0.05 X, _ = make_swiss_roll(n_samples, noise) # Make it thinner X[:, 1] *= .5 # ############################################################################# # Compute clustering print("Compute unstructured hierarchical clustering...") st = time.time() ward = AgglomerativeClustering(n_clusters=6, linkage='ward').fit(X) elapsed_time = time.time() - st label = ward.labels_ print("Elapsed time: %.2fs" % elapsed_time) print("Number of points: %i" % label.size) # ############################################################################# # Plot result fig = plt.figure() ax = p3.Axes3D(fig) ax.view_init(7, -80) for l in np.unique(label): ax.plot3D(X[label == l, 0], X[label == l, 1], X[label == l, 2], 'o', color=plt.cm.jet(np.float(l) / np.max(label + 1))) plt.title('Without connectivity constraints (time %.2fs)' % elapsed_time) # ############################################################################# # Define the structure A of the data. Here a 10 nearest neighbors from sklearn.neighbors import kneighbors_graph connectivity = kneighbors_graph(X, n_neighbors=10, include_self=False) # ############################################################################# # Compute clustering print("Compute structured hierarchical clustering...") st = time.time() ward = AgglomerativeClustering(n_clusters=6, connectivity=connectivity, linkage='ward').fit(X) elapsed_time = time.time() - st label = ward.labels_ print("Elapsed time: %.2fs" % elapsed_time) print("Number of points: %i" % label.size) # ############################################################################# # Plot result fig = plt.figure() ax = p3.Axes3D(fig) ax.view_init(7, -80) for l in np.unique(label): ax.plot3D(X[label == l, 0], X[label == l, 1], X[label == l, 2], 'o', color=plt.cm.jet(float(l) / np.max(label + 1))) plt.title('With connectivity constraints (time %.2fs)' % elapsed_time) plt.show()
bsd-3-clause
imaculate/scikit-learn
sklearn/linear_model/randomized_l1.py
11
24849
""" Randomized Lasso/Logistic: feature selection based on Lasso and sparse Logistic Regression """ # Author: Gael Varoquaux, Alexandre Gramfort # # License: BSD 3 clause import itertools from abc import ABCMeta, abstractmethod import warnings import numpy as np from scipy.sparse import issparse from scipy import sparse from scipy.interpolate import interp1d from .base import _preprocess_data from ..base import BaseEstimator, TransformerMixin from ..externals import six from ..externals.joblib import Memory, Parallel, delayed from ..utils import (as_float_array, check_random_state, check_X_y, check_array, safe_mask) from ..utils.validation import check_is_fitted from .least_angle import lars_path, LassoLarsIC from .logistic import LogisticRegression from ..exceptions import ConvergenceWarning ############################################################################### # Randomized linear model: feature selection def _resample_model(estimator_func, X, y, scaling=.5, n_resampling=200, n_jobs=1, verbose=False, pre_dispatch='3*n_jobs', random_state=None, sample_fraction=.75, **params): random_state = check_random_state(random_state) # We are generating 1 - weights, and not weights n_samples, n_features = X.shape if not (0 < scaling < 1): raise ValueError( "'scaling' should be between 0 and 1. Got %r instead." % scaling) scaling = 1. - scaling scores_ = 0.0 for active_set in Parallel(n_jobs=n_jobs, verbose=verbose, pre_dispatch=pre_dispatch)( delayed(estimator_func)( X, y, weights=scaling * random_state.randint( 0, 2, size=(n_features,)), mask=(random_state.rand(n_samples) < sample_fraction), verbose=max(0, verbose - 1), **params) for _ in range(n_resampling)): scores_ += active_set scores_ /= n_resampling return scores_ class BaseRandomizedLinearModel(six.with_metaclass(ABCMeta, BaseEstimator, TransformerMixin)): """Base class to implement randomized linear models for feature selection This implements the strategy by Meinshausen and Buhlman: stability selection with randomized sampling, and random re-weighting of the penalty. """ @abstractmethod def __init__(self): pass _preprocess_data = staticmethod(_preprocess_data) def fit(self, X, y): """Fit the model using X, y as training data. Parameters ---------- X : array-like, shape = [n_samples, n_features] Training data. y : array-like, shape = [n_samples] Target values. Returns ------- self : object Returns an instance of self. """ X, y = check_X_y(X, y, ['csr', 'csc'], y_numeric=True, ensure_min_samples=2, estimator=self) X = as_float_array(X, copy=False) n_samples, n_features = X.shape X, y, X_offset, y_offset, X_scale = \ self._preprocess_data(X, y, self.fit_intercept, self.normalize) estimator_func, params = self._make_estimator_and_params(X, y) memory = self.memory if isinstance(memory, six.string_types): memory = Memory(cachedir=memory) scores_ = memory.cache( _resample_model, ignore=['verbose', 'n_jobs', 'pre_dispatch'] )( estimator_func, X, y, scaling=self.scaling, n_resampling=self.n_resampling, n_jobs=self.n_jobs, verbose=self.verbose, pre_dispatch=self.pre_dispatch, random_state=self.random_state, sample_fraction=self.sample_fraction, **params) if scores_.ndim == 1: scores_ = scores_[:, np.newaxis] self.all_scores_ = scores_ self.scores_ = np.max(self.all_scores_, axis=1) return self def _make_estimator_and_params(self, X, y): """Return the parameters passed to the estimator""" raise NotImplementedError def get_support(self, indices=False): """Return a mask, or list, of the features/indices selected.""" check_is_fitted(self, 'scores_') mask = self.scores_ > self.selection_threshold return mask if not indices else np.where(mask)[0] # XXX: the two function below are copy/pasted from feature_selection, # Should we add an intermediate base class? def transform(self, X): """Transform a new matrix using the selected features""" mask = self.get_support() X = check_array(X) if len(mask) != X.shape[1]: raise ValueError("X has a different shape than during fitting.") return check_array(X)[:, safe_mask(X, mask)] def inverse_transform(self, X): """Transform a new matrix using the selected features""" support = self.get_support() if X.ndim == 1: X = X[None, :] Xt = np.zeros((X.shape[0], support.size)) Xt[:, support] = X return Xt ############################################################################### # Randomized lasso: regression settings def _randomized_lasso(X, y, weights, mask, alpha=1., verbose=False, precompute=False, eps=np.finfo(np.float).eps, max_iter=500): X = X[safe_mask(X, mask)] y = y[mask] # Center X and y to avoid fit the intercept X -= X.mean(axis=0) y -= y.mean() alpha = np.atleast_1d(np.asarray(alpha, dtype=np.float64)) X = (1 - weights) * X with warnings.catch_warnings(): warnings.simplefilter('ignore', ConvergenceWarning) alphas_, _, coef_ = lars_path(X, y, Gram=precompute, copy_X=False, copy_Gram=False, alpha_min=np.min(alpha), method='lasso', verbose=verbose, max_iter=max_iter, eps=eps) if len(alpha) > 1: if len(alphas_) > 1: # np.min(alpha) < alpha_min interpolator = interp1d(alphas_[::-1], coef_[:, ::-1], bounds_error=False, fill_value=0.) scores = (interpolator(alpha) != 0.0) else: scores = np.zeros((X.shape[1], len(alpha)), dtype=np.bool) else: scores = coef_[:, -1] != 0.0 return scores class RandomizedLasso(BaseRandomizedLinearModel): """Randomized Lasso. Randomized Lasso works by subsampling the training data and computing a Lasso estimate where the penalty of a random subset of coefficients has been scaled. By performing this double randomization several times, the method assigns high scores to features that are repeatedly selected across randomizations. This is known as stability selection. In short, features selected more often are considered good features. Read more in the :ref:`User Guide <randomized_l1>`. Parameters ---------- alpha : float, 'aic', or 'bic', optional The regularization parameter alpha parameter in the Lasso. Warning: this is not the alpha parameter in the stability selection article which is scaling. scaling : float, optional The s parameter used to randomly scale the penalty of different features (See :ref:`User Guide <randomized_l1>` for details ). Should be between 0 and 1. sample_fraction : float, optional The fraction of samples to be used in each randomized design. Should be between 0 and 1. If 1, all samples are used. n_resampling : int, optional Number of randomized models. selection_threshold: float, optional The score above which features should be selected. fit_intercept : boolean, optional whether to calculate the intercept for this model. If set to false, no intercept will be used in calculations (e.g. data is expected to be already centered). verbose : boolean or integer, optional Sets the verbosity amount normalize : boolean, optional, default False If True, the regressors X will be normalized before regression. This parameter is ignored when `fit_intercept` is set to False. When the regressors are normalized, note that this makes the hyperparameters learned more robust and almost independent of the number of samples. The same property is not valid for standardized data. However, if you wish to standardize, please use `preprocessing.StandardScaler` before calling `fit` on an estimator with `normalize=False`. precompute : True | False | 'auto' Whether to use a precomputed Gram matrix to speed up calculations. If set to 'auto' let us decide. The Gram matrix can also be passed as argument. max_iter : integer, optional Maximum number of iterations to perform in the Lars algorithm. eps : float, optional The machine-precision regularization in the computation of the Cholesky diagonal factors. Increase this for very ill-conditioned systems. Unlike the 'tol' parameter in some iterative optimization-based algorithms, this parameter does not control the tolerance of the optimization. n_jobs : integer, optional Number of CPUs to use during the resampling. If '-1', use all the CPUs random_state : int, RandomState instance or None, optional (default=None) If int, random_state is the seed used by the random number generator; If RandomState instance, random_state is the random number generator; If None, the random number generator is the RandomState instance used by `np.random`. pre_dispatch : int, or string, optional Controls the number of jobs that get dispatched during parallel execution. Reducing this number can be useful to avoid an explosion of memory consumption when more jobs get dispatched than CPUs can process. This parameter can be: - None, in which case all the jobs are immediately created and spawned. Use this for lightweight and fast-running jobs, to avoid delays due to on-demand spawning of the jobs - An int, giving the exact number of total jobs that are spawned - A string, giving an expression as a function of n_jobs, as in '2*n_jobs' memory : Instance of joblib.Memory or string Used for internal caching. By default, no caching is done. If a string is given, it is the path to the caching directory. Attributes ---------- scores_ : array, shape = [n_features] Feature scores between 0 and 1. all_scores_ : array, shape = [n_features, n_reg_parameter] Feature scores between 0 and 1 for all values of the regularization \ parameter. The reference article suggests ``scores_`` is the max of \ ``all_scores_``. Examples -------- >>> from sklearn.linear_model import RandomizedLasso >>> randomized_lasso = RandomizedLasso() Notes ----- See examples/linear_model/plot_sparse_recovery.py for an example. References ---------- Stability selection Nicolai Meinshausen, Peter Buhlmann Journal of the Royal Statistical Society: Series B Volume 72, Issue 4, pages 417-473, September 2010 DOI: 10.1111/j.1467-9868.2010.00740.x See also -------- RandomizedLogisticRegression, Lasso, ElasticNet """ def __init__(self, alpha='aic', scaling=.5, sample_fraction=.75, n_resampling=200, selection_threshold=.25, fit_intercept=True, verbose=False, normalize=True, precompute='auto', max_iter=500, eps=np.finfo(np.float).eps, random_state=None, n_jobs=1, pre_dispatch='3*n_jobs', memory=Memory(cachedir=None, verbose=0)): self.alpha = alpha self.scaling = scaling self.sample_fraction = sample_fraction self.n_resampling = n_resampling self.fit_intercept = fit_intercept self.max_iter = max_iter self.verbose = verbose self.normalize = normalize self.precompute = precompute self.eps = eps self.random_state = random_state self.n_jobs = n_jobs self.selection_threshold = selection_threshold self.pre_dispatch = pre_dispatch self.memory = memory def _make_estimator_and_params(self, X, y): assert self.precompute in (True, False, None, 'auto') alpha = self.alpha if isinstance(alpha, six.string_types) and alpha in ('aic', 'bic'): model = LassoLarsIC(precompute=self.precompute, criterion=self.alpha, max_iter=self.max_iter, eps=self.eps) model.fit(X, y) self.alpha_ = alpha = model.alpha_ return _randomized_lasso, dict(alpha=alpha, max_iter=self.max_iter, eps=self.eps, precompute=self.precompute) ############################################################################### # Randomized logistic: classification settings def _randomized_logistic(X, y, weights, mask, C=1., verbose=False, fit_intercept=True, tol=1e-3): X = X[safe_mask(X, mask)] y = y[mask] if issparse(X): size = len(weights) weight_dia = sparse.dia_matrix((1 - weights, 0), (size, size)) X = X * weight_dia else: X *= (1 - weights) C = np.atleast_1d(np.asarray(C, dtype=np.float64)) scores = np.zeros((X.shape[1], len(C)), dtype=np.bool) for this_C, this_scores in zip(C, scores.T): # XXX : would be great to do it with a warm_start ... clf = LogisticRegression(C=this_C, tol=tol, penalty='l1', dual=False, fit_intercept=fit_intercept) clf.fit(X, y) this_scores[:] = np.any( np.abs(clf.coef_) > 10 * np.finfo(np.float).eps, axis=0) return scores class RandomizedLogisticRegression(BaseRandomizedLinearModel): """Randomized Logistic Regression Randomized Logistic Regression works by subsampling the training data and fitting a L1-penalized LogisticRegression model where the penalty of a random subset of coefficients has been scaled. By performing this double randomization several times, the method assigns high scores to features that are repeatedly selected across randomizations. This is known as stability selection. In short, features selected more often are considered good features. Read more in the :ref:`User Guide <randomized_l1>`. Parameters ---------- C : float, optional, default=1 The regularization parameter C in the LogisticRegression. scaling : float, optional, default=0.5 The s parameter used to randomly scale the penalty of different features (See :ref:`User Guide <randomized_l1>` for details ). Should be between 0 and 1. sample_fraction : float, optional, default=0.75 The fraction of samples to be used in each randomized design. Should be between 0 and 1. If 1, all samples are used. n_resampling : int, optional, default=200 Number of randomized models. selection_threshold : float, optional, default=0.25 The score above which features should be selected. fit_intercept : boolean, optional, default=True whether to calculate the intercept for this model. If set to false, no intercept will be used in calculations (e.g. data is expected to be already centered). verbose : boolean or integer, optional Sets the verbosity amount normalize : boolean, optional, default False If True, the regressors X will be normalized before regression. This parameter is ignored when `fit_intercept` is set to False. When the regressors are normalized, note that this makes the hyperparameters learnt more robust and almost independent of the number of samples. The same property is not valid for standardized data. However, if you wish to standardize, please use `preprocessing.StandardScaler` before calling `fit` on an estimator with `normalize=False`. tol : float, optional, default=1e-3 tolerance for stopping criteria of LogisticRegression n_jobs : integer, optional Number of CPUs to use during the resampling. If '-1', use all the CPUs random_state : int, RandomState instance or None, optional (default=None) If int, random_state is the seed used by the random number generator; If RandomState instance, random_state is the random number generator; If None, the random number generator is the RandomState instance used by `np.random`. pre_dispatch : int, or string, optional Controls the number of jobs that get dispatched during parallel execution. Reducing this number can be useful to avoid an explosion of memory consumption when more jobs get dispatched than CPUs can process. This parameter can be: - None, in which case all the jobs are immediately created and spawned. Use this for lightweight and fast-running jobs, to avoid delays due to on-demand spawning of the jobs - An int, giving the exact number of total jobs that are spawned - A string, giving an expression as a function of n_jobs, as in '2*n_jobs' memory : Instance of joblib.Memory or string Used for internal caching. By default, no caching is done. If a string is given, it is the path to the caching directory. Attributes ---------- scores_ : array, shape = [n_features] Feature scores between 0 and 1. all_scores_ : array, shape = [n_features, n_reg_parameter] Feature scores between 0 and 1 for all values of the regularization \ parameter. The reference article suggests ``scores_`` is the max \ of ``all_scores_``. Examples -------- >>> from sklearn.linear_model import RandomizedLogisticRegression >>> randomized_logistic = RandomizedLogisticRegression() Notes ----- See examples/linear_model/plot_sparse_recovery.py for an example. References ---------- Stability selection Nicolai Meinshausen, Peter Buhlmann Journal of the Royal Statistical Society: Series B Volume 72, Issue 4, pages 417-473, September 2010 DOI: 10.1111/j.1467-9868.2010.00740.x See also -------- RandomizedLasso, LogisticRegression """ def __init__(self, C=1, scaling=.5, sample_fraction=.75, n_resampling=200, selection_threshold=.25, tol=1e-3, fit_intercept=True, verbose=False, normalize=True, random_state=None, n_jobs=1, pre_dispatch='3*n_jobs', memory=Memory(cachedir=None, verbose=0)): self.C = C self.scaling = scaling self.sample_fraction = sample_fraction self.n_resampling = n_resampling self.fit_intercept = fit_intercept self.verbose = verbose self.normalize = normalize self.tol = tol self.random_state = random_state self.n_jobs = n_jobs self.selection_threshold = selection_threshold self.pre_dispatch = pre_dispatch self.memory = memory def _make_estimator_and_params(self, X, y): params = dict(C=self.C, tol=self.tol, fit_intercept=self.fit_intercept) return _randomized_logistic, params def _preprocess_data(self, X, y, fit_intercept, normalize=False): """Center the data in X but not in y""" X, _, X_offset, _, X_scale = _preprocess_data(X, y, fit_intercept, normalize=normalize) return X, y, X_offset, y, X_scale ############################################################################### # Stability paths def _lasso_stability_path(X, y, mask, weights, eps): "Inner loop of lasso_stability_path" X = X * weights[np.newaxis, :] X = X[safe_mask(X, mask), :] y = y[mask] alpha_max = np.max(np.abs(np.dot(X.T, y))) / X.shape[0] alpha_min = eps * alpha_max # set for early stopping in path with warnings.catch_warnings(): warnings.simplefilter('ignore', ConvergenceWarning) alphas, _, coefs = lars_path(X, y, method='lasso', verbose=False, alpha_min=alpha_min) # Scale alpha by alpha_max alphas /= alphas[0] # Sort alphas in assending order alphas = alphas[::-1] coefs = coefs[:, ::-1] # Get rid of the alphas that are too small mask = alphas >= eps # We also want to keep the first one: it should be close to the OLS # solution mask[0] = True alphas = alphas[mask] coefs = coefs[:, mask] return alphas, coefs def lasso_stability_path(X, y, scaling=0.5, random_state=None, n_resampling=200, n_grid=100, sample_fraction=0.75, eps=4 * np.finfo(np.float).eps, n_jobs=1, verbose=False): """Stabiliy path based on randomized Lasso estimates Read more in the :ref:`User Guide <randomized_l1>`. Parameters ---------- X : array-like, shape = [n_samples, n_features] training data. y : array-like, shape = [n_samples] target values. scaling : float, optional, default=0.5 The alpha parameter in the stability selection article used to randomly scale the features. Should be between 0 and 1. random_state : integer or numpy.random.RandomState, optional The generator used to randomize the design. n_resampling : int, optional, default=200 Number of randomized models. n_grid : int, optional, default=100 Number of grid points. The path is linearly reinterpolated on a grid between 0 and 1 before computing the scores. sample_fraction : float, optional, default=0.75 The fraction of samples to be used in each randomized design. Should be between 0 and 1. If 1, all samples are used. eps : float, optional Smallest value of alpha / alpha_max considered n_jobs : integer, optional Number of CPUs to use during the resampling. If '-1', use all the CPUs verbose : boolean or integer, optional Sets the verbosity amount Returns ------- alphas_grid : array, shape ~ [n_grid] The grid points between 0 and 1: alpha/alpha_max scores_path : array, shape = [n_features, n_grid] The scores for each feature along the path. Notes ----- See examples/linear_model/plot_sparse_recovery.py for an example. """ rng = check_random_state(random_state) if not (0 < scaling < 1): raise ValueError("Parameter 'scaling' should be between 0 and 1." " Got %r instead." % scaling) n_samples, n_features = X.shape paths = Parallel(n_jobs=n_jobs, verbose=verbose)( delayed(_lasso_stability_path)( X, y, mask=rng.rand(n_samples) < sample_fraction, weights=1. - scaling * rng.randint(0, 2, size=(n_features,)), eps=eps) for k in range(n_resampling)) all_alphas = sorted(list(set(itertools.chain(*[p[0] for p in paths])))) # Take approximately n_grid values stride = int(max(1, int(len(all_alphas) / float(n_grid)))) all_alphas = all_alphas[::stride] if not all_alphas[-1] == 1: all_alphas.append(1.) all_alphas = np.array(all_alphas) scores_path = np.zeros((n_features, len(all_alphas))) for alphas, coefs in paths: if alphas[0] != 0: alphas = np.r_[0, alphas] coefs = np.c_[np.ones((n_features, 1)), coefs] if alphas[-1] != all_alphas[-1]: alphas = np.r_[alphas, all_alphas[-1]] coefs = np.c_[coefs, np.zeros((n_features, 1))] scores_path += (interp1d(alphas, coefs, kind='nearest', bounds_error=False, fill_value=0, axis=-1)(all_alphas) != 0) scores_path /= n_resampling return all_alphas, scores_path
bsd-3-clause
jtwhite79/pyemu
pyemu/utils/gw_utils.py
1
110032
"""MODFLOW support utilities""" import os from datetime import datetime import shutil import warnings import numpy as np import pandas as pd import re pd.options.display.max_colwidth = 100 from pyemu.pst.pst_utils import ( SFMT, IFMT, FFMT, pst_config, parse_tpl_file, try_process_output_file, ) from pyemu.utils.os_utils import run from pyemu.utils.helpers import _write_df_tpl from ..pyemu_warnings import PyemuWarning PP_FMT = { "name": SFMT, "x": FFMT, "y": FFMT, "zone": IFMT, "tpl": SFMT, "parval1": FFMT, } PP_NAMES = ["name", "x", "y", "zone", "parval1"] def modflow_pval_to_template_file(pval_file, tpl_file=None): """write a template file for a modflow parameter value file. Args: pval_file (`str`): the path and name of the existing modflow pval file tpl_file (`str`, optional): template file to write. If None, use `pval_file` +".tpl". Default is None Note: Uses names in the first column in the pval file as par names. Returns: **pandas.DataFrame**: a dataFrame with control file parameter information """ if tpl_file is None: tpl_file = pval_file + ".tpl" pval_df = pd.read_csv( pval_file, delim_whitespace=True, header=None, skiprows=2, names=["parnme", "parval1"], ) pval_df.index = pval_df.parnme pval_df.loc[:, "tpl"] = pval_df.parnme.apply(lambda x: " ~ {0:15s} ~".format(x)) with open(tpl_file, "w") as f: f.write("ptf ~\n#pval template file from pyemu\n") f.write("{0:10d} #NP\n".format(pval_df.shape[0])) f.write( pval_df.loc[:, ["parnme", "tpl"]].to_string( col_space=0, formatters=[SFMT, SFMT], index=False, header=False, justify="left", ) ) return pval_df def modflow_hob_to_instruction_file(hob_file, ins_file=None): """write an instruction file for a modflow head observation file Args: hob_file (`str`): the path and name of the existing modflow hob file ins_file (`str`, optional): the name of the instruction file to write. If `None`, `hob_file` +".ins" is used. Default is `None`. Returns: **pandas.DataFrame**: a dataFrame with control file observation information """ hob_df = pd.read_csv( hob_file, delim_whitespace=True, skiprows=1, header=None, names=["simval", "obsval", "obsnme"], ) hob_df.loc[:, "obsnme"] = hob_df.obsnme.apply(str.lower) hob_df.loc[:, "ins_line"] = hob_df.obsnme.apply(lambda x: "l1 !{0:s}!".format(x)) hob_df.loc[0, "ins_line"] = hob_df.loc[0, "ins_line"].replace("l1", "l2") if ins_file is None: ins_file = hob_file + ".ins" f_ins = open(ins_file, "w") f_ins.write("pif ~\n") f_ins.write( hob_df.loc[:, ["ins_line"]].to_string( col_space=0, columns=["ins_line"], header=False, index=False, formatters=[SFMT], ) + "\n" ) hob_df.loc[:, "weight"] = 1.0 hob_df.loc[:, "obgnme"] = "obgnme" f_ins.close() return hob_df def modflow_hydmod_to_instruction_file(hydmod_file, ins_file=None): """write an instruction file for a modflow hydmod file Args: hydmod_file (`str`): the path and name of the existing modflow hob file ins_file (`str`, optional): the name of the instruction file to write. If `None`, `hydmod_file` +".ins" is used. Default is `None`. Returns: **pandas.DataFrame**: a dataFrame with control file observation information Note: calls `pyemu.gw_utils.modflow_read_hydmod_file()` """ hydmod_df, hydmod_outfile = modflow_read_hydmod_file(hydmod_file) hydmod_df.loc[:, "ins_line"] = hydmod_df.obsnme.apply( lambda x: "l1 w !{0:s}!".format(x) ) if ins_file is None: ins_file = hydmod_outfile + ".ins" with open(ins_file, "w") as f_ins: f_ins.write("pif ~\nl1\n") f_ins.write( hydmod_df.loc[:, ["ins_line"]].to_string( col_space=0, columns=["ins_line"], header=False, index=False, formatters=[SFMT], ) + "\n" ) hydmod_df.loc[:, "weight"] = 1.0 hydmod_df.loc[:, "obgnme"] = "obgnme" df = try_process_output_file(hydmod_outfile + ".ins") if df is not None: df.loc[:, "obsnme"] = df.index.values df.loc[:, "obgnme"] = df.obsnme.apply(lambda x: x[:-9]) df.to_csv("_setup_" + os.path.split(hydmod_outfile)[-1] + ".csv", index=False) return df return hydmod_df def modflow_read_hydmod_file(hydmod_file, hydmod_outfile=None): """read a binary hydmod file and return a dataframe of the results Args: hydmod_file (`str`): The path and name of the existing modflow hydmod binary file hydmod_outfile (`str`, optional): output file to write. If `None`, use `hydmod_file` +".dat". Default is `None`. Returns: **pandas.DataFrame**: a dataFrame with hymod_file values """ try: import flopy.utils as fu except Exception as e: print("flopy is not installed - cannot read {0}\n{1}".format(hydmod_file, e)) return obs = fu.HydmodObs(hydmod_file) hyd_df = obs.get_dataframe() hyd_df.columns = [i[2:] if i.lower() != "totim" else i for i in hyd_df.columns] # hyd_df.loc[:,"datetime"] = hyd_df.index hyd_df["totim"] = hyd_df.index.map(lambda x: x.strftime("%Y%m%d")) hyd_df.rename(columns={"totim": "datestamp"}, inplace=True) # reshape into a single column hyd_df = pd.melt(hyd_df, id_vars="datestamp") hyd_df.rename(columns={"value": "obsval"}, inplace=True) hyd_df["obsnme"] = [ i.lower() + "_" + j.lower() for i, j in zip(hyd_df.variable, hyd_df.datestamp) ] vc = hyd_df.obsnme.value_counts().sort_values() vc = list(vc.loc[vc > 1].index.values) if len(vc) > 0: hyd_df.to_csv("hyd_df.duplciates.csv") obs.get_dataframe().to_csv("hyd_org.duplicates.csv") raise Exception("duplicates in obsnme:{0}".format(vc)) # assert hyd_df.obsnme.value_counts().max() == 1,"duplicates in obsnme" if not hydmod_outfile: hydmod_outfile = hydmod_file + ".dat" hyd_df.to_csv(hydmod_outfile, columns=["obsnme", "obsval"], sep=" ", index=False) # hyd_df = hyd_df[['obsnme','obsval']] return hyd_df[["obsnme", "obsval"]], hydmod_outfile def setup_mtlist_budget_obs( list_filename, gw_filename="mtlist_gw.dat", sw_filename="mtlist_sw.dat", start_datetime="1-1-1970", gw_prefix="gw", sw_prefix="sw", save_setup_file=False, ): """setup observations of gw (and optionally sw) mass budgets from mt3dusgs list file. Args: list_filename (`str`): path and name of existing modflow list file gw_filename (`str`, optional): output filename that will contain the gw budget observations. Default is "mtlist_gw.dat" sw_filename (`str`, optional): output filename that will contain the sw budget observations. Default is "mtlist_sw.dat" start_datetime (`str`, optional): an str that can be parsed into a `pandas.TimeStamp`. used to give budget observations meaningful names. Default is "1-1-1970". gw_prefix (`str`, optional): a prefix to add to the GW budget observations. Useful if processing more than one list file as part of the forward run process. Default is 'gw'. sw_prefix (`str`, optional): a prefix to add to the SW budget observations. Useful if processing more than one list file as part of the forward run process. Default is 'sw'. save_setup_file (`bool`, optional): a flag to save "_setup_"+ `list_filename` +".csv" file that contains useful control file information. Default is `False`. Returns: tuple containing - **str**: the command to add to the forward run script - **str**: the names of the instruction files that were created - **pandas.DataFrame**: a dataframe with information for constructing a control file Note: writes an instruction file and also a _setup_.csv to use when constructing a pest control file The instruction files are named `out_filename` +".ins" It is recommended to use the default value for `gw_filename` or `sw_filename`. This is the companion function of `gw_utils.apply_mtlist_budget_obs()`. """ gw, sw = apply_mtlist_budget_obs( list_filename, gw_filename, sw_filename, start_datetime ) gw_ins = gw_filename + ".ins" _write_mtlist_ins(gw_ins, gw, gw_prefix) ins_files = [gw_ins] df_gw = try_process_output_file(gw_ins, gw_filename) if df_gw is None: raise Exception("error processing groundwater instruction file") if sw is not None: sw_ins = sw_filename + ".ins" _write_mtlist_ins(sw_ins, sw, sw_prefix) ins_files.append(sw_ins) df_sw = try_process_output_file(sw_ins, sw_filename) if df_sw is None: raise Exception("error processing surface water instruction file") df_gw = df_gw.append(df_sw) df_gw.loc[:, "obsnme"] = df_gw.index.values if save_setup_file: df_gw.to_csv("_setup_" + os.path.split(list_filename)[-1] + ".csv", index=False) frun_line = "pyemu.gw_utils.apply_mtlist_budget_obs('{0}')".format(list_filename) return frun_line, ins_files, df_gw def _write_mtlist_ins(ins_filename, df, prefix): """write an instruction file for a MT3D-USGS list file""" try: dt_str = df.index.map(lambda x: x.strftime("%Y%m%d")) except: dt_str = df.index.map(lambda x: "{0:08.1f}".format(x).strip()) with open(ins_filename, "w") as f: f.write("pif ~\nl1\n") for dt in dt_str: f.write("l1 ") for col in df.columns.str.translate( {ord(s): None for s in ["(", ")", "/", "="]} ): if prefix == "": obsnme = "{0}_{1}".format(col, dt) else: obsnme = "{0}_{1}_{2}".format(prefix, col, dt) f.write(" w !{0}!".format(obsnme)) f.write("\n") def apply_mtlist_budget_obs( list_filename, gw_filename="mtlist_gw.dat", sw_filename="mtlist_sw.dat", start_datetime="1-1-1970", ): """process an MT3D-USGS list file to extract mass budget entries. Args: list_filename (`str`): the path and name of an existing MT3D-USGS list file gw_filename (`str`, optional): the name of the output file with gw mass budget information. Default is "mtlist_gw.dat" sw_filename (`str`): the name of the output file with sw mass budget information. Default is "mtlist_sw.dat" start_datatime (`str`): an str that can be cast to a pandas.TimeStamp. Used to give observations a meaningful name Returns: 2-element tuple containing - **pandas.DataFrame**: the gw mass budget dataframe - **pandas.DataFrame**: (optional) the sw mass budget dataframe. If the SFT process is not active, this returned value is `None`. Note: This is the companion function of `gw_utils.setup_mtlist_budget_obs()`. """ try: import flopy except Exception as e: raise Exception("error import flopy: {0}".format(str(e))) mt = flopy.utils.MtListBudget(list_filename) gw, sw = mt.parse(start_datetime=start_datetime, diff=True) gw = gw.drop( [ col for col in gw.columns for drop_col in ["kper", "kstp", "tkstp"] if (col.lower().startswith(drop_col)) ], axis=1, ) gw.to_csv(gw_filename, sep=" ", index_label="datetime", date_format="%Y%m%d") if sw is not None: sw = sw.drop( [ col for col in sw.columns for drop_col in ["kper", "kstp", "tkstp"] if (col.lower().startswith(drop_col)) ], axis=1, ) sw.to_csv(sw_filename, sep=" ", index_label="datetime", date_format="%Y%m%d") return gw, sw def setup_mflist_budget_obs( list_filename, flx_filename="flux.dat", vol_filename="vol.dat", start_datetime="1-1'1970", prefix="", save_setup_file=False, specify_times=None, ): """setup observations of budget volume and flux from modflow list file. Args: list_filename (`str`): path and name of the existing modflow list file flx_filename (`str`, optional): output filename that will contain the budget flux observations. Default is "flux.dat" vol_filename (`str`, optional): output filename that will contain the budget volume observations. Default is "vol.dat" start_datetime (`str`, optional): a string that can be parsed into a pandas.TimeStamp. This is used to give budget observations meaningful names. Default is "1-1-1970". prefix (`str`, optional): a prefix to add to the water budget observations. Useful if processing more than one list file as part of the forward run process. Default is ''. save_setup_file (`bool`): a flag to save "_setup_"+ `list_filename` +".csv" file that contains useful control file information specify_times (`np.ndarray`-like, optional): An array of times to extract from the budget dataframes returned by the flopy MfListBudget(list_filename).get_dataframe() method. This can be useful to ensure consistent observation times for PEST. Array needs to be alignable with index of dataframe return by flopy method, care should be take to ensure that this is the case. If passed will be written to "budget_times.config" file as strings to be read by the companion `apply_mflist_budget_obs()` method at run time. Returns: **pandas.DataFrame**: a dataframe with information for constructing a control file. Note: This method writes instruction files and also a _setup_.csv to use when constructing a pest control file. The instruction files are named <flux_file>.ins and <vol_file>.ins, respectively It is recommended to use the default values for flux_file and vol_file. This is the companion function of `gw_utils.apply_mflist_budget_obs()`. """ flx, vol = apply_mflist_budget_obs( list_filename, flx_filename, vol_filename, start_datetime, times=specify_times ) _write_mflist_ins(flx_filename + ".ins", flx, prefix + "flx") _write_mflist_ins(vol_filename + ".ins", vol, prefix + "vol") df = try_process_output_file(flx_filename + ".ins") if df is None: raise Exception("error processing flux instruction file") df2 = try_process_output_file(vol_filename + ".ins") if df2 is None: raise Exception("error processing volume instruction file") df = df.append(df2) df.loc[:, "obsnme"] = df.index.values if save_setup_file: df.to_csv("_setup_" + os.path.split(list_filename)[-1] + ".csv", index=False) if specify_times is not None: np.savetxt( os.path.join(os.path.dirname(flx_filename), "budget_times.config"), specify_times, fmt="%s", ) return df def apply_mflist_budget_obs( list_filename, flx_filename="flux.dat", vol_filename="vol.dat", start_datetime="1-1-1970", times=None, ): """process a MODFLOW list file to extract flux and volume water budget entries. Args: list_filename (`str`): path and name of the existing modflow list file flx_filename (`str`, optional): output filename that will contain the budget flux observations. Default is "flux.dat" vol_filename (`str`, optional): output filename that will contain the budget volume observations. Default is "vol.dat" start_datetime (`str`, optional): a string that can be parsed into a pandas.TimeStamp. This is used to give budget observations meaningful names. Default is "1-1-1970". times (`np.ndarray`-like or `str`, optional): An array of times to extract from the budget dataframes returned by the flopy MfListBudget(list_filename).get_dataframe() method. This can be useful to ensure consistent observation times for PEST. If type `str`, will assume `times=filename` and attempt to read single vector (no header or index) from file, parsing datetime using pandas. Array needs to be alignable with index of dataframe return by flopy method, care should be take to ensure that this is the case. If setup with `setup_mflist_budget_obs()` specifying `specify_times` argument `times` should be set to "budget_times.config". Note: This is the companion function of `gw_utils.setup_mflist_budget_obs()`. Returns: tuple containing - **pandas.DataFrame**: a dataframe with flux budget information - **pandas.DataFrame**: a dataframe with cumulative budget information """ try: import flopy except Exception as e: raise Exception("error import flopy: {0}".format(str(e))) mlf = flopy.utils.MfListBudget(list_filename) flx, vol = mlf.get_dataframes(start_datetime=start_datetime, diff=True) if times is not None: if isinstance(times, str): if vol.index.tzinfo: parse_date = {"t": [0]} names = [None] else: parse_date = False names = ["t"] times = pd.read_csv( times, header=None, names=names, parse_dates=parse_date )["t"].values flx = flx.loc[times] vol = vol.loc[times] flx.to_csv(flx_filename, sep=" ", index_label="datetime", date_format="%Y%m%d") vol.to_csv(vol_filename, sep=" ", index_label="datetime", date_format="%Y%m%d") return flx, vol def _write_mflist_ins(ins_filename, df, prefix): """write an instruction file for a MODFLOW list file""" dt_str = df.index.map(lambda x: x.strftime("%Y%m%d")) with open(ins_filename, "w") as f: f.write("pif ~\nl1\n") for dt in dt_str: f.write("l1 ") for col in df.columns: obsnme = "{0}_{1}_{2}".format(prefix, col, dt) f.write(" w !{0}!".format(obsnme)) f.write("\n") def setup_hds_timeseries( bin_file, kij_dict, prefix=None, include_path=False, model=None, postprocess_inact=None, text=None, fill=None, precision="single", ): """a function to setup a forward process to extract time-series style values from a binary modflow binary file (or equivalent format - hds, ucn, sub, cbb, etc). Args: bin_file (`str`): path and name of existing modflow binary file - headsave, cell budget and MT3D UCN supported. kij_dict (`dict`): dictionary of site_name: [k,i,j] pairs. For example: `{"wel1":[0,1,1]}`. prefix (`str`, optional): string to prepend to site_name when forming observation names. Default is None include_path (`bool`, optional): flag to setup the binary file processing in directory where the hds_file is located (if different from where python is running). This is useful for setting up the process in separate directory for where python is running. model (`flopy.mbase`, optional): a `flopy.basemodel` instance. If passed, the observation names will have the datetime of the observation appended to them (using the flopy `start_datetime` attribute. If None, the observation names will have the zero-based stress period appended to them. Default is None. postprocess_inact (`float`, optional): Inactive value in heads/ucn file e.g. mt.btn.cinit. If `None`, no inactive value processing happens. Default is `None`. text (`str`): the text record entry in the binary file (e.g. "constant_head"). Used to indicate that the binary file is a MODFLOW cell-by-cell budget file. If None, headsave or MT3D unformatted concentration file is assummed. Default is None fill (`float`): fill value for NaNs in the extracted timeseries dataframe. If `None`, no filling is done, which may yield model run failures as the resulting processed timeseries CSV file (produced at runtime) may have missing values and can't be processed with the cooresponding instruction file. Default is `None`. precision (`str`): the precision of the binary file. Can be "single" or "double". Default is "single". Returns: tuple containing - **str**: the forward run command to execute the binary file process during model runs. - **pandas.DataFrame**: a dataframe of observation information for use in the pest control file Note: This function writes hds_timeseries.config that must be in the same dir where `apply_hds_timeseries()` is called during the forward run Assumes model time units are days This is the companion function of `gw_utils.apply_hds_timeseries()`. """ try: import flopy except Exception as e: print("error importing flopy, returning {0}".format(str(e))) return assert os.path.exists(bin_file), "binary file not found" iscbc = False if text is not None: text = text.upper() try: # hack: if model is passed and its None, it trips up CellBudgetFile... if model is not None: bf = flopy.utils.CellBudgetFile( bin_file, precision=precision, model=model ) iscbc = True else: bf = flopy.utils.CellBudgetFile(bin_file, precision=precision) iscbc = True except Exception as e: try: if model is not None: bf = flopy.utils.HeadFile( bin_file, precision=precision, model=model, text=text ) else: bf = flopy.utils.HeadFile(bin_file, precision=precision, text=text) except Exception as e1: raise Exception( "error instantiating binary file as either CellBudgetFile:{0} or as HeadFile with text arg: {1}".format( str(e), str(e1) ) ) if iscbc: tl = [t.decode().strip() for t in bf.textlist] if text not in tl: raise Exception( "'text' {0} not found in CellBudgetFile.textlist:{1}".format( text, tl ) ) elif bin_file.lower().endswith(".ucn"): try: bf = flopy.utils.UcnFile(bin_file, precision=precision) except Exception as e: raise Exception("error instantiating UcnFile:{0}".format(str(e))) else: try: bf = flopy.utils.HeadFile(bin_file, precision=precision) except Exception as e: raise Exception("error instantiating HeadFile:{0}".format(str(e))) if text is None: text = "none" nlay, nrow, ncol = bf.nlay, bf.nrow, bf.ncol # if include_path: # pth = os.path.join(*[p for p in os.path.split(hds_file)[:-1]]) # config_file = os.path.join(pth,"{0}_timeseries.config".format(hds_file)) # else: config_file = "{0}_timeseries.config".format(bin_file) print("writing config file to {0}".format(config_file)) if fill is None: fill = "none" f_config = open(config_file, "w") if model is not None: if model.dis.itmuni != 4: warnings.warn( "setup_hds_timeseries only supports 'days' time units...", PyemuWarning ) f_config.write( "{0},{1},d,{2},{3},{4},{5}\n".format( os.path.split(bin_file)[-1], model.start_datetime, text, fill, precision, iscbc, ) ) start = pd.to_datetime(model.start_datetime) else: f_config.write( "{0},none,none,{1},{2},{3},{4}\n".format( os.path.split(bin_file)[-1], text, fill, precision, iscbc ) ) f_config.write("site,k,i,j\n") dfs = [] for site, (k, i, j) in kij_dict.items(): assert k >= 0 and k < nlay, k assert i >= 0 and i < nrow, i assert j >= 0 and j < ncol, j site = site.lower().replace(" ", "") if iscbc: ts = bf.get_ts((k, i, j), text=text) # print(ts) df = pd.DataFrame(data=ts, columns=["totim", site]) else: df = pd.DataFrame(data=bf.get_ts((k, i, j)), columns=["totim", site]) if model is not None: dts = start + pd.to_timedelta(df.totim, unit="d") df.loc[:, "totim"] = dts # print(df) f_config.write("{0},{1},{2},{3}\n".format(site, k, i, j)) df.index = df.pop("totim") dfs.append(df) f_config.close() df = pd.concat(dfs, axis=1).T df.to_csv(bin_file + "_timeseries.processed", sep=" ") if model is not None: t_str = df.columns.map(lambda x: x.strftime("%Y%m%d")) else: t_str = df.columns.map(lambda x: "{0:08.2f}".format(x)) ins_file = bin_file + "_timeseries.processed.ins" print("writing instruction file to {0}".format(ins_file)) with open(ins_file, "w") as f: f.write("pif ~\n") f.write("l1 \n") for site in df.index: # for t in t_str: f.write("l1 w ") # for site in df.columns: for t in t_str: if prefix is not None: obsnme = "{0}_{1}_{2}".format(prefix, site, t) else: obsnme = "{0}_{1}".format(site, t) f.write(" !{0}!".format(obsnme)) f.write("\n") if postprocess_inact is not None: _setup_postprocess_hds_timeseries( bin_file, df, config_file, prefix=prefix, model=model ) bd = "." if include_path: bd = os.getcwd() pth = os.path.join(*[p for p in os.path.split(bin_file)[:-1]]) os.chdir(pth) config_file = os.path.split(config_file)[-1] try: df = apply_hds_timeseries(config_file, postprocess_inact=postprocess_inact) except Exception as e: os.chdir(bd) raise Exception("error in apply_hds_timeseries(): {0}".format(str(e))) os.chdir(bd) df = try_process_output_file(ins_file) if df is None: raise Exception("error processing {0} instruction file".format(ins_file)) df.loc[:, "weight"] = 0.0 if prefix is not None: df.loc[:, "obgnme"] = df.index.map(lambda x: "_".join(x.split("_")[:2])) else: df.loc[:, "obgnme"] = df.index.map(lambda x: x.split("_")[0]) frun_line = "pyemu.gw_utils.apply_hds_timeseries('{0}',{1})\n".format( config_file, postprocess_inact ) return frun_line, df def apply_hds_timeseries(config_file=None, postprocess_inact=None): """process a modflow binary file using a previously written configuration file Args: config_file (`str`, optional): configuration file written by `pyemu.gw_utils.setup_hds_timeseries`. If `None`, looks for `hds_timeseries.config` postprocess_inact (`float`, optional): Inactive value in heads/ucn file e.g. mt.btn.cinit. If `None`, no inactive value processing happens. Default is `None`. Note: This is the companion function of `gw_utils.setup_hds_timeseries()`. """ import flopy if config_file is None: config_file = "hds_timeseries.config" assert os.path.exists(config_file), config_file with open(config_file, "r") as f: line = f.readline() ( bf_file, start_datetime, time_units, text, fill, precision, _iscbc, ) = line.strip().split(",") if len(line.strip().split(",")) == 6: ( bf_file, start_datetime, time_units, text, fill, precision, ) = line.strip().split(",") _iscbc = "false" else: ( bf_file, start_datetime, time_units, text, fill, precision, _iscbc, ) = line.strip().split(",") site_df = pd.read_csv(f) text = text.upper() if _iscbc.lower().strip() == "false": iscbc = False elif _iscbc.lower().strip() == "true": iscbc = True else: raise Exception( "apply_hds_timeseries() error: unrecognized 'iscbc' string in config file: {0}".format( _iscbc ) ) assert os.path.exists(bf_file), "head save file not found" if iscbc: try: bf = flopy.utils.CellBudgetFile(bf_file, precision=precision) except Exception as e: raise Exception("error instantiating CellBudgetFile:{0}".format(str(e))) elif bf_file.lower().endswith(".ucn"): try: bf = flopy.utils.UcnFile(bf_file, precision=precision) except Exception as e: raise Exception("error instantiating UcnFile:{0}".format(str(e))) else: try: if text != "NONE": bf = flopy.utils.HeadFile(bf_file, text=text, precision=precision) else: bf = flopy.utils.HeadFile(bf_file, precision=precision) except Exception as e: raise Exception("error instantiating HeadFile:{0}".format(str(e))) nlay, nrow, ncol = bf.nlay, bf.nrow, bf.ncol dfs = [] for site, k, i, j in zip(site_df.site, site_df.k, site_df.i, site_df.j): assert k >= 0 and k < nlay assert i >= 0 and i < nrow assert j >= 0 and j < ncol if iscbc: df = pd.DataFrame( data=bf.get_ts((k, i, j), text=text), columns=["totim", site] ) else: df = pd.DataFrame(data=bf.get_ts((k, i, j)), columns=["totim", site]) df.index = df.pop("totim") dfs.append(df) df = pd.concat(dfs, axis=1).T if df.shape != df.dropna().shape: warnings.warn("NANs in processed timeseries file", PyemuWarning) if fill.upper() != "NONE": fill = float(fill) df.fillna(fill, inplace=True) # print(df) df.to_csv(bf_file + "_timeseries.processed", sep=" ") if postprocess_inact is not None: _apply_postprocess_hds_timeseries(config_file, postprocess_inact) return df def _setup_postprocess_hds_timeseries( hds_file, df, config_file, prefix=None, model=None ): """Dirty function to setup post processing concentrations in inactive/dry cells""" warnings.warn( "Setting up post processing of hds or ucn timeseries obs. " "Prepending 'pp' to obs name may cause length to exceed 20 chars", PyemuWarning, ) if model is not None: t_str = df.columns.map(lambda x: x.strftime("%Y%m%d")) else: t_str = df.columns.map(lambda x: "{0:08.2f}".format(x)) if prefix is not None: prefix = "pp{0}".format(prefix) else: prefix = "pp" ins_file = hds_file + "_timeseries.post_processed.ins" print("writing instruction file to {0}".format(ins_file)) with open(ins_file, "w") as f: f.write("pif ~\n") f.write("l1 \n") for site in df.index: f.write("l1 w ") # for site in df.columns: for t in t_str: obsnme = "{0}{1}_{2}".format(prefix, site, t) f.write(" !{0}!".format(obsnme)) f.write("\n") frun_line = "pyemu.gw_utils._apply_postprocess_hds_timeseries('{0}')\n".format( config_file ) return frun_line def _apply_postprocess_hds_timeseries(config_file=None, cinact=1e30): """private function to post processing binary files""" import flopy if config_file is None: config_file = "hds_timeseries.config" assert os.path.exists(config_file), config_file with open(config_file, "r") as f: line = f.readline() ( hds_file, start_datetime, time_units, text, fill, precision, _iscbc, ) = line.strip().split(",") if len(line.strip().split(",")) == 6: ( hds_file, start_datetime, time_units, text, fill, precision, ) = line.strip().split(",") _iscbc = "false" else: ( hds_file, start_datetime, time_units, text, fill, precision, _iscbc, ) = line.strip().split(",") site_df = pd.read_csv(f) # print(site_df) text = text.upper() assert os.path.exists(hds_file), "head save file not found" if hds_file.lower().endswith(".ucn"): try: hds = flopy.utils.UcnFile(hds_file, precision=precision) except Exception as e: raise Exception("error instantiating UcnFile:{0}".format(str(e))) else: try: if text != "NONE": hds = flopy.utils.HeadFile(hds_file, text=text, precision=precision) else: hds = flopy.utils.HeadFile(hds_file, precision=precision) except Exception as e: raise Exception("error instantiating HeadFile:{0}".format(str(e))) nlay, nrow, ncol = hds.nlay, hds.nrow, hds.ncol dfs = [] for site, k, i, j in zip(site_df.site, site_df.k, site_df.i, site_df.j): assert k >= 0 and k < nlay assert i >= 0 and i < nrow assert j >= 0 and j < ncol if text.upper() != "NONE": df = pd.DataFrame(data=hds.get_ts((k, i, j)), columns=["totim", site]) else: df = pd.DataFrame(data=hds.get_ts((k, i, j)), columns=["totim", site]) df.index = df.pop("totim") inact_obs = df[site].apply(lambda x: np.isclose(x, cinact)) if inact_obs.sum() > 0: assert k + 1 < nlay, "Inactive observation in lowest layer" df_lower = pd.DataFrame( data=hds.get_ts((k + 1, i, j)), columns=["totim", site] ) df_lower.index = df_lower.pop("totim") df.loc[inact_obs] = df_lower.loc[inact_obs] print( "{0} observation(s) post-processed for site {1} at kij ({2},{3},{4})".format( inact_obs.sum(), site, k, i, j ) ) dfs.append(df) df = pd.concat(dfs, axis=1).T # print(df) df.to_csv(hds_file + "_timeseries.post_processed", sep=" ") return df def setup_hds_obs( hds_file, kperk_pairs=None, skip=None, prefix="hds", text="head", precision="single", include_path=False, ): """a function to setup using all values from a layer-stress period pair for observations. Args: hds_file (`str`): path and name of an existing MODFLOW head-save file. If the hds_file endswith 'ucn', then the file is treated as a UcnFile type. kperk_pairs ([(int,int)]): a list of len two tuples which are pairs of kper (zero-based stress period index) and k (zero-based layer index) to setup observations for. If None, then all layers and stress period records found in the file will be used. Caution: a shit-ton of observations may be produced! skip (variable, optional): a value or function used to determine which values to skip when setting up observations. If np.scalar(skip) is True, then values equal to skip will not be used. If skip can also be a np.ndarry with dimensions equal to the model. Observations are set up only for cells with Non-zero values in the array. If not np.ndarray or np.scalar(skip), then skip will be treated as a lambda function that returns np.NaN if the value should be skipped. prefix (`str`): the prefix to use for the observation names. default is "hds". text (`str`): the text tag the flopy HeadFile instance. Default is "head" precison (`str`): the precision string for the flopy HeadFile instance. Default is "single" include_path (`bool`, optional): flag to setup the binary file processing in directory where the hds_file is located (if different from where python is running). This is useful for setting up the process in separate directory for where python is running. Returns: tuple containing - **str**: the forward run script line needed to execute the headsave file observation operation - **pandas.DataFrame**: a dataframe of pest control file information Note: Writes an instruction file and a _setup_ csv used construct a control file. This is the companion function to `gw_utils.apply_hds_obs()`. """ try: import flopy except Exception as e: print("error importing flopy, returning {0}".format(str(e))) return assert os.path.exists(hds_file), "head save file not found" if hds_file.lower().endswith(".ucn"): try: hds = flopy.utils.UcnFile(hds_file) except Exception as e: raise Exception("error instantiating UcnFile:{0}".format(str(e))) elif text.lower() == "headu": try: hds = flopy.utils.HeadUFile(hds_file, text=text, precision=precision) except Exception as e: raise Exception("error instantiating HeadFile:{0}".format(str(e))) else: try: hds = flopy.utils.HeadFile(hds_file, text=text, precision=precision) except Exception as e: raise Exception("error instantiating HeadFile:{0}".format(str(e))) if kperk_pairs is None: kperk_pairs = [] for kstp, kper in hds.kstpkper: kperk_pairs.extend([(kper - 1, k) for k in range(hds.nlay)]) if len(kperk_pairs) == 2: try: if len(kperk_pairs[0]) == 2: pass except: kperk_pairs = [kperk_pairs] # if start_datetime is not None: # start_datetime = pd.to_datetime(start_datetime) # dts = start_datetime + pd.to_timedelta(hds.times,unit='d') data = {} kpers = [kper - 1 for kstp, kper in hds.kstpkper] for kperk_pair in kperk_pairs: kper, k = kperk_pair assert kper in kpers, "kper not in hds:{0}".format(kper) assert k in range(hds.nlay), "k not in hds:{0}".format(k) kstp = last_kstp_from_kper(hds, kper) d = hds.get_data(kstpkper=(kstp, kper))[k] data["{0}_{1}".format(kper, k)] = d.flatten() # data[(kper,k)] = d.flatten() idx, iidx, jidx = [], [], [] for _ in range(len(data)): for i in range(hds.nrow): iidx.extend([i for _ in range(hds.ncol)]) jidx.extend([j for j in range(hds.ncol)]) idx.extend(["i{0:04d}_j{1:04d}".format(i, j) for j in range(hds.ncol)]) idx = idx[: hds.nrow * hds.ncol] df = pd.DataFrame(data, index=idx) data_cols = list(df.columns) data_cols.sort() # df.loc[:,"iidx"] = iidx # df.loc[:,"jidx"] = jidx if skip is not None: for col in data_cols: if np.isscalar(skip): df.loc[df.loc[:, col] == skip, col] = np.NaN elif isinstance(skip, np.ndarray): assert ( skip.ndim >= 2 ), "skip passed as {}D array, At least 2D (<= 4D) array required".format( skip.ndim ) assert skip.shape[-2:] == ( hds.nrow, hds.ncol, ), "Array dimensions of arg. skip needs to match model dimensions ({0},{1}). ({2},{3}) passed".format( hds.nrow, hds.ncol, skip.shape[-2], skip.shape[-1] ) if skip.ndim == 2: print( "2D array passed for skip, assuming constant for all layers and kper" ) skip = np.tile(skip, (len(kpers), hds.nlay, 1, 1)) if skip.ndim == 3: print("3D array passed for skip, assuming constant for all kper") skip = np.tile(skip, (len(kpers), 1, 1, 1)) kper, k = [int(c) for c in col.split("_")] df.loc[ df.index.map( lambda x: skip[ kper, k, int(x.split("_")[0].strip("i")), int(x.split("_")[1].strip("j")), ] == 0 ), col, ] = np.NaN else: df.loc[:, col] = df.loc[:, col].apply(skip) # melt to long form df = df.melt(var_name="kperk", value_name="obsval") # set row and col identifies df.loc[:, "iidx"] = iidx df.loc[:, "jidx"] = jidx # drop nans from skip df = df.dropna() # set some additional identifiers df.loc[:, "kper"] = df.kperk.apply(lambda x: int(x.split("_")[0])) df.loc[:, "kidx"] = df.pop("kperk").apply(lambda x: int(x.split("_")[1])) # form obs names # def get_kper_str(kper): # if start_datetime is not None: # return dts[int(kper)].strftime("%Y%m%d") # else: # return "kper{0:04.0f}".format(kper) fmt = prefix + "_{0:02.0f}_{1:03.0f}_{2:03.0f}_{3:03.0f}" # df.loc[:,"obsnme"] = df.apply(lambda x: fmt.format(x.kidx,x.iidx,x.jidx, # get_kper_str(x.kper)),axis=1) df.loc[:, "obsnme"] = df.apply( lambda x: fmt.format(x.kidx, x.iidx, x.jidx, x.kper), axis=1 ) df.loc[:, "ins_str"] = df.obsnme.apply(lambda x: "l1 w !{0}!".format(x)) df.loc[:, "obgnme"] = prefix # write the instruction file with open(hds_file + ".dat.ins", "w") as f: f.write("pif ~\nl1\n") df.ins_str.to_string(f, index=False, header=False) # write the corresponding output file df.loc[:, ["obsnme", "obsval"]].to_csv(hds_file + ".dat", sep=" ", index=False) hds_path = os.path.dirname(hds_file) setup_file = os.path.join( hds_path, "_setup_{0}.csv".format(os.path.split(hds_file)[-1]) ) df.to_csv(setup_file) if not include_path: hds_file = os.path.split(hds_file)[-1] fwd_run_line = ( "pyemu.gw_utils.apply_hds_obs('{0}',precision='{1}',text='{2}')\n".format( hds_file, precision, text ) ) df.index = df.obsnme return fwd_run_line, df def last_kstp_from_kper(hds, kper): """function to find the last time step (kstp) for a give stress period (kper) in a modflow head save file. Args: hds (`flopy.utils.HeadFile`): head save file kper (`int`): the zero-index stress period number Returns: **int**: the zero-based last time step during stress period kper in the head save file """ # find the last kstp with this kper kstp = -1 for kkstp, kkper in hds.kstpkper: if kkper == kper + 1 and kkstp > kstp: kstp = kkstp if kstp == -1: raise Exception("kstp not found for kper {0}".format(kper)) kstp -= 1 return kstp def apply_hds_obs(hds_file, inact_abs_val=1.0e20, precision="single", text="head"): """process a modflow head save file. A companion function to `gw_utils.setup_hds_obs()` that is called during the forward run process Args: hds_file (`str`): a modflow head save filename. if hds_file ends with 'ucn', then the file is treated as a UcnFile type. inact_abs_val (`float`, optional): the value that marks the mininum and maximum active value. values in the headsave file greater than `inact_abs_val` or less than -`inact_abs_val` are reset to `inact_abs_val` Returns: **pandas.DataFrame**: a dataframe with extracted simulated values. Note: This is the companion function to `gw_utils.setup_hds_obs()`. """ try: import flopy except Exception as e: raise Exception("apply_hds_obs(): error importing flopy: {0}".format(str(e))) from .. import pst_utils assert os.path.exists(hds_file) out_file = hds_file + ".dat" ins_file = out_file + ".ins" assert os.path.exists(ins_file) df = pd.DataFrame({"obsnme": pst_utils.parse_ins_file(ins_file)}) df.index = df.obsnme # populate metdata items = ["k", "i", "j", "kper"] for i, item in enumerate(items): df.loc[:, item] = df.obsnme.apply(lambda x: int(x.split("_")[i + 1])) if hds_file.lower().endswith("ucn"): hds = flopy.utils.UcnFile(hds_file) elif text.lower() == "headu": hds = flopy.utils.HeadUFile(hds_file) else: hds = flopy.utils.HeadFile(hds_file, precision=precision, text=text) kpers = df.kper.unique() df.loc[:, "obsval"] = np.NaN for kper in kpers: kstp = last_kstp_from_kper(hds, kper) data = hds.get_data(kstpkper=(kstp, kper)) # jwhite 15jan2018 fix for really large values that are getting some # trash added to them... if text.lower() != "headu": data[np.isnan(data)] = 0.0 data[data > np.abs(inact_abs_val)] = np.abs(inact_abs_val) data[data < -np.abs(inact_abs_val)] = -np.abs(inact_abs_val) df_kper = df.loc[df.kper == kper, :] df.loc[df_kper.index, "obsval"] = data[df_kper.k, df_kper.i, df_kper.j] else: df_kper = df.loc[df.kper == kper, :] for k, d in enumerate(data): d[np.isnan(d)] = 0.0 d[d > np.abs(inact_abs_val)] = np.abs(inact_abs_val) d[d < -np.abs(inact_abs_val)] = -np.abs(inact_abs_val) df_kperk = df_kper.loc[df_kper.k == k, :] df.loc[df_kperk.index, "obsval"] = d[df_kperk.i] assert df.dropna().shape[0] == df.shape[0] df.loc[:, ["obsnme", "obsval"]].to_csv(out_file, index=False, sep=" ") return df def setup_sft_obs(sft_file, ins_file=None, start_datetime=None, times=None, ncomp=1): """writes a post-processor and instruction file for a mt3d-usgs sft output file Args: sft_file (`str`): path and name of an existing sft output file (ASCII) ins_file (`str`, optional): the name of the instruction file to create. If None, the name is `sft_file`+".ins". Default is `None`. start_datetime (`str`): a pandas.to_datetime() compatible str. If not None, then the resulting observation names have the datetime suffix. If None, the suffix is the output totim. Default is `None`. times ([`float`]): a list of times to make observations for. If None, all times found in the file are used. Default is None. ncomp (`int`): number of components in transport model. Default is 1. Returns: **pandas.DataFrame**: a dataframe with observation names and values for the sft simulated concentrations. Note: This is the companion function to `gw_utils.apply_sft_obs()`. """ df = pd.read_csv(sft_file, skiprows=1, delim_whitespace=True) df.columns = [c.lower().replace("-", "_") for c in df.columns] if times is None: times = df.time.unique() missing = [] utimes = df.time.unique() for t in times: if t not in utimes: missing.append(str(t)) if len(missing) > 0: print(df.time) raise Exception("the following times are missing:{0}".format(",".join(missing))) with open("sft_obs.config", "w") as f: f.write(sft_file + "\n") [f.write("{0:15.6E}\n".format(t)) for t in times] df = apply_sft_obs() utimes = df.time.unique() for t in times: assert t in utimes, "time {0} missing in processed dataframe".format(t) idx = df.time.apply(lambda x: x in times) if start_datetime is not None: start_datetime = pd.to_datetime(start_datetime) df.loc[:, "time_str"] = pd.to_timedelta(df.time, unit="d") + start_datetime df.loc[:, "time_str"] = df.time_str.apply( lambda x: datetime.strftime(x, "%Y%m%d") ) else: df.loc[:, "time_str"] = df.time.apply(lambda x: "{0:08.2f}".format(x)) df.loc[:, "ins_str"] = "l1\n" # check for multiple components df_times = df.loc[idx, :] df.loc[:, "icomp"] = 1 icomp_idx = list(df.columns).index("icomp") for t in times: df_time = df.loc[df.time == t, :].copy() vc = df_time.sfr_node.value_counts() ncomp = vc.max() assert np.all(vc.values == ncomp) nstrm = df_time.shape[0] / ncomp for icomp in range(ncomp): s = int(nstrm * (icomp)) e = int(nstrm * (icomp + 1)) idxs = df_time.iloc[s:e, :].index # df_time.iloc[nstrm*(icomp):nstrm*(icomp+1),icomp_idx.loc["icomp"] = int(icomp+1) df_time.loc[idxs, "icomp"] = int(icomp + 1) # df.loc[df_time.index,"ins_str"] = df_time.apply(lambda x: "l1 w w !sfrc{0}_{1}_{2}! !swgw{0}_{1}_{2}! !gwcn{0}_{1}_{2}!\n".\ # format(x.sfr_node,x.icomp,x.time_str),axis=1) df.loc[df_time.index, "ins_str"] = df_time.apply( lambda x: "l1 w w !sfrc{0}_{1}_{2}!\n".format( x.sfr_node, x.icomp, x.time_str ), axis=1, ) df.index = np.arange(df.shape[0]) if ins_file is None: ins_file = sft_file + ".processed.ins" with open(ins_file, "w") as f: f.write("pif ~\nl1\n") [f.write(i) for i in df.ins_str] # df = try_process_ins_file(ins_file,sft_file+".processed") df = try_process_output_file(ins_file, sft_file + ".processed") return df def apply_sft_obs(): """process an mt3d-usgs sft ASCII output file using a previous-written config file Returns: **pandas.DataFrame**: a dataframe of extracted simulated outputs Note: This is the companion function to `gw_utils.setup_sft_obs()`. """ # this is for dealing with the missing 'e' problem def try_cast(x): try: return float(x) except: return 0.0 times = [] with open("sft_obs.config") as f: sft_file = f.readline().strip() for line in f: times.append(float(line.strip())) df = pd.read_csv(sft_file, skiprows=1, delim_whitespace=True) # ,nrows=10000000) df.columns = [c.lower().replace("-", "_") for c in df.columns] df = df.loc[df.time.apply(lambda x: x in times), :] # print(df.dtypes) # normalize for c in df.columns: # print(c) if not "node" in c: df.loc[:, c] = df.loc[:, c].apply(try_cast) # print(df.loc[df.loc[:,c].apply(lambda x : type(x) == str),:]) if df.dtypes[c] == float: df.loc[df.loc[:, c] < 1e-30, c] = 0.0 df.loc[df.loc[:, c] > 1e30, c] = 1.0e30 df.loc[:, "sfr_node"] = df.sfr_node.apply(np.int) df.to_csv(sft_file + ".processed", sep=" ", index=False) return df def setup_sfr_seg_parameters( nam_file, model_ws=".", par_cols=None, tie_hcond=True, include_temporal_pars=None ): """Setup multiplier parameters for SFR segment data. Args: nam_file (`str`): MODFLOw name file. DIS, BAS, and SFR must be available as pathed in the nam_file. Optionally, `nam_file` can be an existing `flopy.modflow.Modflow`. model_ws (`str`): model workspace for flopy to load the MODFLOW model from par_cols ([`str`]): a list of segment data entires to parameterize tie_hcond (`bool`): flag to use same mult par for hcond1 and hcond2 for a given segment. Default is `True`. include_temporal_pars ([`str`]): list of spatially-global multipliers to set up for each stress period. Default is None Returns: **pandas.DataFrame**: a dataframe with useful parameter setup information Note: This function handles the standard input case, not all the cryptic SFR options. Loads the dis, bas, and sfr files with flopy using model_ws. This is the companion function to `gw_utils.apply_sfr_seg_parameters()` . The number (and numbering) of segment data entries must consistent across all stress periods. Writes `nam_file` +"_backup_.sfr" as the backup of the original sfr file Skips values = 0.0 since multipliers don't work for these """ try: import flopy except Exception as e: return if par_cols is None: par_cols = ["flow", "runoff", "hcond1", "pptsw"] if tie_hcond: if "hcond1" not in par_cols or "hcond2" not in par_cols: tie_hcond = False if isinstance(nam_file, flopy.modflow.mf.Modflow) and nam_file.sfr is not None: m = nam_file nam_file = m.namefile model_ws = m.model_ws else: # load MODFLOW model # is this needed? could we just pass the model if it has already been read in? m = flopy.modflow.Modflow.load( nam_file, load_only=["sfr"], model_ws=model_ws, check=False, forgive=False ) if include_temporal_pars: if include_temporal_pars is True: tmp_par_cols = {col: range(m.dis.nper) for col in par_cols} elif isinstance(include_temporal_pars, str): tmp_par_cols = {include_temporal_pars: range(m.dis.nper)} elif isinstance(include_temporal_pars, list): tmp_par_cols = {col: range(m.dis.nper) for col in include_temporal_pars} elif isinstance(include_temporal_pars, dict): tmp_par_cols = include_temporal_pars include_temporal_pars = True else: tmp_par_cols = {} include_temporal_pars = False # make backup copy of sfr file shutil.copy( os.path.join(model_ws, m.sfr.file_name[0]), os.path.join(model_ws, nam_file + "_backup_.sfr"), ) # get the segment data (dict) segment_data = m.sfr.segment_data shape = segment_data[list(segment_data.keys())[0]].shape # check for kper, seg_data in m.sfr.segment_data.items(): assert ( seg_data.shape == shape ), "cannot use: seg data must have the same number of entires for all kpers" seg_data_col_order = list(seg_data.dtype.names) # convert segment_data dictionary to multi index df - this could get ugly reform = { (k, c): segment_data[k][c] for k in segment_data.keys() for c in segment_data[k].dtype.names } seg_data_all_kper = pd.DataFrame.from_dict(reform) seg_data_all_kper.columns.names = ["kper", "col"] # extract the first seg data kper to a dataframe seg_data = seg_data_all_kper[0].copy() # pd.DataFrame.from_records(seg_data) # make sure all par cols are found and search of any data in kpers missing = [] cols = par_cols.copy() for par_col in set(par_cols + list(tmp_par_cols.keys())): if par_col not in seg_data.columns: if par_col in cols: missing.append(cols.pop(cols.index(par_col))) if par_col in tmp_par_cols.keys(): _ = tmp_par_cols.pop(par_col) # look across all kper in multiindex df to check for values entry - fill with absmax should capture entries else: seg_data.loc[:, par_col] = ( seg_data_all_kper.loc[:, (slice(None), par_col)] .abs() .max(level=1, axis=1) ) if len(missing) > 0: warnings.warn( "the following par_cols were not found in segment data: {0}".format( ",".join(missing) ), PyemuWarning, ) if len(missing) >= len(par_cols): warnings.warn( "None of the passed par_cols ({0}) were found in segment data.".format( ",".join(par_cols) ), PyemuWarning, ) seg_data = seg_data[seg_data_col_order] # reset column orders to inital seg_data_org = seg_data.copy() seg_data.to_csv(os.path.join(model_ws, "sfr_seg_pars.dat"), sep=",") # the data cols not to parameterize # better than a column indexer as pandas can change column orders idx_cols = ["nseg", "icalc", "outseg", "iupseg", "iprior", "nstrpts"] notpar_cols = [c for c in seg_data.columns if c not in cols + idx_cols] # process par cols tpl_str, pvals = [], [] if include_temporal_pars: tmp_pnames, tmp_tpl_str = [], [] tmp_df = pd.DataFrame( data={c: 1.0 for c in tmp_par_cols.keys()}, index=list(m.sfr.segment_data.keys()), ) tmp_df.sort_index(inplace=True) tmp_df.to_csv(os.path.join(model_ws, "sfr_seg_temporal_pars.dat")) for par_col in set(cols + list(tmp_par_cols.keys())): print(par_col) prefix = par_col if tie_hcond and par_col == "hcond2": prefix = "hcond1" if seg_data.loc[:, par_col].sum() == 0.0: print("all zeros for {0}...skipping...".format(par_col)) # seg_data.loc[:,par_col] = 1 # all zero so no need to set up if par_col in cols: # - add to notpar notpar_cols.append(cols.pop(cols.index(par_col))) if par_col in tmp_par_cols.keys(): _ = tmp_par_cols.pop(par_col) if par_col in cols: seg_data.loc[:, par_col] = seg_data.apply( lambda x: "~ {0}_{1:04d} ~".format(prefix, int(x.nseg)) if float(x[par_col]) != 0.0 else "1.0", axis=1, ) org_vals = seg_data_org.loc[seg_data_org.loc[:, par_col] != 0.0, par_col] pnames = seg_data.loc[org_vals.index, par_col] pvals.extend(list(org_vals.values)) tpl_str.extend(list(pnames.values)) if par_col in tmp_par_cols.keys(): parnme = tmp_df.index.map( lambda x: "{0}_{1:04d}_tmp".format(par_col, int(x)) if x in tmp_par_cols[par_col] else 1.0 ) sel = parnme != 1.0 tmp_df.loc[sel, par_col] = parnme[sel].map(lambda x: "~ {0} ~".format(x)) tmp_tpl_str.extend(list(tmp_df.loc[sel, par_col].values)) tmp_pnames.extend(list(parnme[sel].values)) pnames = [t.replace("~", "").strip() for t in tpl_str] df = pd.DataFrame( {"parnme": pnames, "org_value": pvals, "tpl_str": tpl_str}, index=pnames ) df.drop_duplicates(inplace=True) if df.empty: warnings.warn( "No spatial sfr segment parameters have been set up, " "either none of {0} were found or all were zero.".format( ",".join(par_cols) ), PyemuWarning, ) # return df # set not par cols to 1.0 seg_data.loc[:, notpar_cols] = "1.0" # write the template file _write_df_tpl(os.path.join(model_ws, "sfr_seg_pars.dat.tpl"), seg_data, sep=",") # make sure the tpl file exists and has the same num of pars parnme = parse_tpl_file(os.path.join(model_ws, "sfr_seg_pars.dat.tpl")) assert len(parnme) == df.shape[0] # set some useful par info df["pargp"] = df.parnme.apply(lambda x: x.split("_")[0]) if include_temporal_pars: _write_df_tpl( filename=os.path.join(model_ws, "sfr_seg_temporal_pars.dat.tpl"), df=tmp_df ) pargp = [pname.split("_")[0] + "_tmp" for pname in tmp_pnames] tmp_df = pd.DataFrame( data={"parnme": tmp_pnames, "pargp": pargp}, index=tmp_pnames ) if not tmp_df.empty: tmp_df.loc[:, "org_value"] = 1.0 tmp_df.loc[:, "tpl_str"] = tmp_tpl_str df = df.append(tmp_df[df.columns]) if df.empty: warnings.warn( "No sfr segment parameters have been set up, " "either none of {0} were found or all were zero.".format( ",".join(set(par_cols + list(tmp_par_cols.keys()))) ), PyemuWarning, ) return df # write the config file used by apply_sfr_pars() with open(os.path.join(model_ws, "sfr_seg_pars.config"), "w") as f: f.write("nam_file {0}\n".format(nam_file)) f.write("model_ws {0}\n".format(model_ws)) f.write("mult_file sfr_seg_pars.dat\n") f.write("sfr_filename {0}\n".format(m.sfr.file_name[0])) if include_temporal_pars: f.write("time_mult_file sfr_seg_temporal_pars.dat\n") # set some useful par info df.loc[:, "parubnd"] = 1.25 df.loc[:, "parlbnd"] = 0.75 hpars = df.loc[df.pargp.apply(lambda x: x.startswith("hcond")), "parnme"] df.loc[hpars, "parubnd"] = 100.0 df.loc[hpars, "parlbnd"] = 0.01 return df def setup_sfr_reach_parameters(nam_file, model_ws=".", par_cols=["strhc1"]): """Setup multiplier paramters for reach data, when reachinput option is specififed in sfr. Args: nam_file (`str`): MODFLOw name file. DIS, BAS, and SFR must be available as pathed in the nam_file. Optionally, `nam_file` can be an existing `flopy.modflow.Modflow`. model_ws (`str`): model workspace for flopy to load the MODFLOW model from par_cols ([`str`]): a list of segment data entires to parameterize tie_hcond (`bool`): flag to use same mult par for hcond1 and hcond2 for a given segment. Default is `True`. include_temporal_pars ([`str`]): list of spatially-global multipliers to set up for each stress period. Default is None Returns: **pandas.DataFrame**: a dataframe with useful parameter setup information Note: Similar to `gw_utils.setup_sfr_seg_parameters()`, method will apply params to sfr reachdata Can load the dis, bas, and sfr files with flopy using model_ws. Or can pass a model object (SFR loading can be slow) This is the companion function of `gw_utils.apply_sfr_reach_parameters()` Skips values = 0.0 since multipliers don't work for these """ try: import flopy except Exception as e: return if par_cols is None: par_cols = ["strhc1"] if isinstance(nam_file, flopy.modflow.mf.Modflow) and nam_file.sfr is not None: # flopy MODFLOW model has been passed and has SFR loaded m = nam_file nam_file = m.namefile model_ws = m.model_ws else: # if model has not been passed or SFR not loaded # load MODFLOW model m = flopy.modflow.Modflow.load( nam_file, load_only=["sfr"], model_ws=model_ws, check=False, forgive=False ) # get reachdata as dataframe reach_data = pd.DataFrame.from_records(m.sfr.reach_data) # write inital reach_data as csv reach_data_orig = reach_data.copy() reach_data.to_csv(os.path.join(m.model_ws, "sfr_reach_pars.dat"), sep=",") # generate template file with pars in par_cols # process par cols tpl_str, pvals = [], [] # par_cols=["strhc1"] idx_cols = ["node", "k", "i", "j", "iseg", "ireach", "reachID", "outreach"] # the data cols not to parameterize notpar_cols = [c for c in reach_data.columns if c not in par_cols + idx_cols] # make sure all par cols are found and search of any data in kpers missing = [] cols = par_cols.copy() for par_col in par_cols: if par_col not in reach_data.columns: missing.append(par_col) cols.remove(par_col) if len(missing) > 0: warnings.warn( "the following par_cols were not found in reach data: {0}".format( ",".join(missing) ), PyemuWarning, ) if len(missing) >= len(par_cols): warnings.warn( "None of the passed par_cols ({0}) were found in reach data.".format( ",".join(par_cols) ), PyemuWarning, ) for par_col in cols: if par_col == "strhc1": prefix = "strk" # shorten par else: prefix = par_col reach_data.loc[:, par_col] = reach_data.apply( lambda x: "~ {0}_{1:04d} ~".format(prefix, int(x.reachID)) if float(x[par_col]) != 0.0 else "1.0", axis=1, ) org_vals = reach_data_orig.loc[reach_data_orig.loc[:, par_col] != 0.0, par_col] pnames = reach_data.loc[org_vals.index, par_col] pvals.extend(list(org_vals.values)) tpl_str.extend(list(pnames.values)) pnames = [t.replace("~", "").strip() for t in tpl_str] df = pd.DataFrame( {"parnme": pnames, "org_value": pvals, "tpl_str": tpl_str}, index=pnames ) df.drop_duplicates(inplace=True) if df.empty: warnings.warn( "No sfr reach parameters have been set up, either none of {0} were found or all were zero.".format( ",".join(par_cols) ), PyemuWarning, ) else: # set not par cols to 1.0 reach_data.loc[:, notpar_cols] = "1.0" # write the template file _write_df_tpl( os.path.join(model_ws, "sfr_reach_pars.dat.tpl"), reach_data, sep="," ) # write the config file used by apply_sfr_pars() with open(os.path.join(model_ws, "sfr_reach_pars.config"), "w") as f: f.write("nam_file {0}\n".format(nam_file)) f.write("model_ws {0}\n".format(model_ws)) f.write("mult_file sfr_reach_pars.dat\n") f.write("sfr_filename {0}".format(m.sfr.file_name[0])) # make sure the tpl file exists and has the same num of pars parnme = parse_tpl_file(os.path.join(model_ws, "sfr_reach_pars.dat.tpl")) assert len(parnme) == df.shape[0] # set some useful par info df.loc[:, "pargp"] = df.parnme.apply(lambda x: x.split("_")[0]) df.loc[:, "parubnd"] = 1.25 df.loc[:, "parlbnd"] = 0.75 hpars = df.loc[df.pargp.apply(lambda x: x.startswith("strk")), "parnme"] df.loc[hpars, "parubnd"] = 100.0 df.loc[hpars, "parlbnd"] = 0.01 return df def apply_sfr_seg_parameters(seg_pars=True, reach_pars=False): """apply the SFR segement multiplier parameters. Args: seg_pars (`bool`, optional): flag to apply segment-based parameters. Default is True reach_pars (`bool`, optional): flag to apply reach-based parameters. Default is False Returns: **flopy.modflow.ModflowSfr**: the modified SFR package instance Note: Expects "sfr_seg_pars.config" to exist Expects `nam_file` +"_backup_.sfr" to exist """ if not seg_pars and not reach_pars: raise Exception( "gw_utils.apply_sfr_pars() error: both seg_pars and reach_pars are False" ) # if seg_pars and reach_pars: # raise Exception("gw_utils.apply_sfr_pars() error: both seg_pars and reach_pars are True") import flopy bak_sfr_file, pars = None, None if seg_pars: assert os.path.exists("sfr_seg_pars.config") with open("sfr_seg_pars.config", "r") as f: pars = {} for line in f: line = line.strip().split() pars[line[0]] = line[1] bak_sfr_file = pars["nam_file"] + "_backup_.sfr" # m = flopy.modflow.Modflow.load(pars["nam_file"],model_ws=pars["model_ws"],load_only=["sfr"],check=False) m = flopy.modflow.Modflow.load(pars["nam_file"], load_only=[], check=False) sfr = flopy.modflow.ModflowSfr2.load(os.path.join(bak_sfr_file), m) sfrfile = pars["sfr_filename"] mlt_df = pd.read_csv(pars["mult_file"], delim_whitespace=False, index_col=0) # time_mlt_df = None # if "time_mult_file" in pars: # time_mult_file = pars["time_mult_file"] # time_mlt_df = pd.read_csv(pars["time_mult_file"], delim_whitespace=False,index_col=0) idx_cols = ["nseg", "icalc", "outseg", "iupseg", "iprior", "nstrpts"] present_cols = [c for c in idx_cols if c in mlt_df.columns] mlt_cols = mlt_df.columns.drop(present_cols) for key, val in m.sfr.segment_data.items(): df = pd.DataFrame.from_records(val) df.loc[:, mlt_cols] *= mlt_df.loc[:, mlt_cols] val = df.to_records(index=False) sfr.segment_data[key] = val if reach_pars: assert os.path.exists("sfr_reach_pars.config") with open("sfr_reach_pars.config", "r") as f: r_pars = {} for line in f: line = line.strip().split() r_pars[line[0]] = line[1] if bak_sfr_file is None: # will be the case is seg_pars is false bak_sfr_file = r_pars["nam_file"] + "_backup_.sfr" # m = flopy.modflow.Modflow.load(pars["nam_file"],model_ws=pars["model_ws"],load_only=["sfr"],check=False) m = flopy.modflow.Modflow.load( r_pars["nam_file"], load_only=[], check=False ) sfr = flopy.modflow.ModflowSfr2.load(os.path.join(bak_sfr_file), m) sfrfile = r_pars["sfr_filename"] r_mlt_df = pd.read_csv(r_pars["mult_file"], sep=",", index_col=0) r_idx_cols = ["node", "k", "i", "j", "iseg", "ireach", "reachID", "outreach"] r_mlt_cols = r_mlt_df.columns.drop(r_idx_cols) r_df = pd.DataFrame.from_records(m.sfr.reach_data) r_df.loc[:, r_mlt_cols] *= r_mlt_df.loc[:, r_mlt_cols] sfr.reach_data = r_df.to_records(index=False) # m.remove_package("sfr") if pars is not None and "time_mult_file" in pars: time_mult_file = pars["time_mult_file"] time_mlt_df = pd.read_csv(time_mult_file, delim_whitespace=False, index_col=0) for kper, sdata in m.sfr.segment_data.items(): assert kper in time_mlt_df.index, ( "gw_utils.apply_sfr_seg_parameters() error: kper " + "{0} not in time_mlt_df index".format(kper) ) for col in time_mlt_df.columns: sdata[col] *= time_mlt_df.loc[kper, col] sfr.write_file(filename=sfrfile) return sfr def apply_sfr_parameters(seg_pars=True, reach_pars=False): """thin wrapper around `gw_utils.apply_sfr_seg_parameters()` Args: seg_pars (`bool`, optional): flag to apply segment-based parameters. Default is True reach_pars (`bool`, optional): flag to apply reach-based parameters. Default is False Returns: **flopy.modflow.ModflowSfr**: the modified SFR package instance Note: Expects "sfr_seg_pars.config" to exist Expects `nam_file` +"_backup_.sfr" to exist """ sfr = apply_sfr_seg_parameters(seg_pars=seg_pars, reach_pars=reach_pars) return sfr def setup_sfr_obs( sfr_out_file, seg_group_dict=None, ins_file=None, model=None, include_path=False ): """setup observations using the sfr ASCII output file. Setups the ability to aggregate flows for groups of segments. Applies only flow to aquier and flow out. Args: sft_out_file (`str`): the name and path to an existing SFR output file seg_group_dict (`dict`): a dictionary of SFR segements to aggregate together for a single obs. the key value in the dict is the base observation name. If None, all segments are used as individual observations. Default is None model (`flopy.mbase`): a flopy model. If passed, the observation names will have the datetime of the observation appended to them. If None, the observation names will have the stress period appended to them. Default is None. include_path (`bool`): flag to prepend sfr_out_file path to sfr_obs.config. Useful for setting up process in separate directory for where python is running. Returns: **pandas.DataFrame**: dataframe of observation name, simulated value and group. Note: This is the companion function of `gw_utils.apply_sfr_obs()`. This function writes "sfr_obs.config" which must be kept in the dir where "gw_utils.apply_sfr_obs()" is being called during the forward run """ sfr_dict = load_sfr_out(sfr_out_file) kpers = list(sfr_dict.keys()) kpers.sort() if seg_group_dict is None: seg_group_dict = {"seg{0:04d}".format(s): s for s in sfr_dict[kpers[0]].segment} else: warnings.warn( "Flow out (flout) of grouped segments will be aggregated... ", PyemuWarning ) sfr_segs = set(sfr_dict[list(sfr_dict.keys())[0]].segment) keys = ["sfr_out_file"] if include_path: values = [os.path.split(sfr_out_file)[-1]] else: values = [sfr_out_file] for oname, segs in seg_group_dict.items(): if np.isscalar(segs): segs_set = {segs} segs = [segs] else: segs_set = set(segs) diff = segs_set.difference(sfr_segs) if len(diff) > 0: raise Exception( "the following segs listed with oname {0} where not found: {1}".format( oname, ",".join([str(s) for s in diff]) ) ) for seg in segs: keys.append(oname) values.append(seg) df_key = pd.DataFrame({"obs_base": keys, "segment": values}) if include_path: pth = os.path.join(*[p for p in os.path.split(sfr_out_file)[:-1]]) config_file = os.path.join(pth, "sfr_obs.config") else: config_file = "sfr_obs.config" print("writing 'sfr_obs.config' to {0}".format(config_file)) df_key.to_csv(config_file) bd = "." if include_path: bd = os.getcwd() os.chdir(pth) try: df = apply_sfr_obs() except Exception as e: os.chdir(bd) raise Exception("error in apply_sfr_obs(): {0}".format(str(e))) os.chdir(bd) if model is not None: dts = ( pd.to_datetime(model.start_datetime) + pd.to_timedelta(np.cumsum(model.dis.perlen.array), unit="d") ).date df.loc[:, "datetime"] = df.kper.apply(lambda x: dts[x]) df.loc[:, "time_str"] = df.datetime.apply(lambda x: x.strftime("%Y%m%d")) else: df.loc[:, "time_str"] = df.kper.apply(lambda x: "{0:04d}".format(x)) df.loc[:, "flaqx_obsnme"] = df.apply( lambda x: "{0}_{1}_{2}".format("fa", x.obs_base, x.time_str), axis=1 ) df.loc[:, "flout_obsnme"] = df.apply( lambda x: "{0}_{1}_{2}".format("fo", x.obs_base, x.time_str), axis=1 ) if ins_file is None: ins_file = sfr_out_file + ".processed.ins" with open(ins_file, "w") as f: f.write("pif ~\nl1\n") for fla, flo in zip(df.flaqx_obsnme, df.flout_obsnme): f.write("l1 w w !{0}! !{1}!\n".format(fla, flo)) df = None pth = os.path.split(ins_file)[:-1] pth = os.path.join(*pth) if pth == "": pth = "." bd = os.getcwd() os.chdir(pth) df = try_process_output_file( os.path.split(ins_file)[-1], os.path.split(sfr_out_file + ".processed")[-1] ) os.chdir(bd) if df is not None: df.loc[:, "obsnme"] = df.index.values df.loc[:, "obgnme"] = df.obsnme.apply( lambda x: "flaqx" if x.startswith("fa") else "flout" ) return df def apply_sfr_obs(): """apply the sfr observation process Args: None Returns: **pandas.DataFrame**: a dataframe of aggregrated sfr segment aquifer and outflow Note: This is the companion function of `gw_utils.setup_sfr_obs()`. Requires `sfr_obs.config`. Writes `sfr_out_file`+".processed", where `sfr_out_file` is defined in "sfr_obs.config" """ assert os.path.exists("sfr_obs.config") df_key = pd.read_csv("sfr_obs.config", index_col=0) assert df_key.iloc[0, 0] == "sfr_out_file", df_key.iloc[0, :] sfr_out_file = df_key.iloc[0, 1] df_key = df_key.iloc[1:, :] df_key.loc[:, "segment"] = df_key.segment.apply(np.int) df_key.index = df_key.segment seg_group_dict = df_key.groupby(df_key.obs_base).groups sfr_kper = load_sfr_out(sfr_out_file) kpers = list(sfr_kper.keys()) kpers.sort() # results = {o:[] for o in seg_group_dict.keys()} results = [] for kper in kpers: df = sfr_kper[kper] for obs_base, segs in seg_group_dict.items(): agg = df.loc[ segs.values, : ].sum() # still agg flout where seg groups are passed! # print(obs_base,agg) results.append([kper, obs_base, agg["flaqx"], agg["flout"]]) df = pd.DataFrame(data=results, columns=["kper", "obs_base", "flaqx", "flout"]) df.sort_values(by=["kper", "obs_base"], inplace=True) df.to_csv(sfr_out_file + ".processed", sep=" ", index=False) return df def load_sfr_out(sfr_out_file, selection=None): """load an ASCII SFR output file into a dictionary of kper: dataframes. Args: sfr_out_file (`str`): SFR ASCII output file selection (`pandas.DataFrame`): a dataframe of `reach` and `segment` pairs to load. If `None`, all reach-segment pairs are loaded. Default is `None`. Returns: **dict**: dictionary of {kper:`pandas.DataFrame`} of SFR output. Note: Aggregates flow to aquifer for segments and returns and flow out at downstream end of segment. """ assert os.path.exists(sfr_out_file), "couldn't find sfr out file {0}".format( sfr_out_file ) tag = " stream listing" lcount = 0 sfr_dict = {} if selection is None: pass elif isinstance(selection, str): assert ( selection == "all" ), "If string passed as selection only 'all' allowed: " "{}".format(selection) else: assert isinstance( selection, pd.DataFrame ), "'selection needs to be pandas Dataframe. " "Type {} passed.".format( type(selection) ) assert np.all( [sr in selection.columns for sr in ["segment", "reach"]] ), "Either 'segment' or 'reach' not in selection columns" with open(sfr_out_file) as f: while True: line = f.readline().lower() lcount += 1 if line == "": break if line.startswith(tag): raw = line.strip().split() kper = int(raw[3]) - 1 kstp = int(raw[5]) - 1 [f.readline() for _ in range(4)] # skip to where the data starts lcount += 4 dlines = [] while True: dline = f.readline() lcount += 1 if dline.strip() == "": break draw = dline.strip().split() dlines.append(draw) df = pd.DataFrame(data=np.array(dlines)).iloc[:, [3, 4, 6, 7]] df.columns = ["segment", "reach", "flaqx", "flout"] df["segment"] = df.segment.astype(np.int) df["reach"] = df.reach.astype(np.int) df["flaqx"] = df.flaqx.astype(np.float) df["flout"] = df.flout.astype(np.float) df.index = [ "{0:03d}_{1:03d}".format(s, r) for s, r in np.array([df.segment.values, df.reach.values]).T ] # df.index = df.apply( # lambda x: "{0:03d}_{1:03d}".format( # int(x.segment), int(x.reach)), axis=1) if selection is None: # setup for all segs, aggregate gp = df.groupby(df.segment) bot_reaches = ( gp[["reach"]] .max() .apply( lambda x: "{0:03d}_{1:03d}".format( int(x.name), int(x.reach) ), axis=1, ) ) # only sum distributed output # take flow out of seg df2 = pd.DataFrame( { "flaqx": gp.flaqx.sum(), "flout": df.loc[bot_reaches, "flout"].values, }, index=gp.groups.keys(), ) # df = df.groupby(df.segment).sum() df2["segment"] = df2.index elif isinstance(selection, str) and selection == "all": df2 = df else: seg_reach_id = selection.apply( lambda x: "{0:03d}_{1:03d}".format( int(x.segment), int(x.reach) ), axis=1, ).values for sr in seg_reach_id: if sr not in df.index: s, r = [x.lstrip("0") for x in sr.split("_")] warnings.warn( "Requested segment reach pair ({0},{1}) " "is not in sfr output. Dropping...".format( int(r), int(s) ), PyemuWarning, ) seg_reach_id = np.delete( seg_reach_id, np.where(seg_reach_id == sr), axis=0 ) df2 = df.loc[seg_reach_id].copy() if kper in sfr_dict.keys(): print( "multiple entries found for kper {0}, " "replacing...".format(kper) ) sfr_dict[kper] = df2 return sfr_dict def setup_sfr_reach_obs( sfr_out_file, seg_reach=None, ins_file=None, model=None, include_path=False ): """setup observations using the sfr ASCII output file. Setups sfr point observations using segment and reach numbers. Args: sft_out_file (`str`): the path and name of an existing SFR output file seg_reach (varies): a dict, or list of SFR [segment,reach] pairs identifying locations of interest. If `dict`, the key value in the dict is the base observation name. If None, all reaches are used as individual observations. Default is None - THIS MAY SET UP A LOT OF OBS! model (`flopy.mbase`): a flopy model. If passed, the observation names will have the datetime of the observation appended to them. If None, the observation names will have the stress period appended to them. Default is None. include_path (`bool`): a flag to prepend sfr_out_file path to sfr_obs.config. Useful for setting up process in separate directory for where python is running. Returns: `pd.DataFrame`: a dataframe of observation names, values, and groups Note: This is the companion function of `gw_utils.apply_sfr_reach_obs()`. This function writes "sfr_reach_obs.config" which must be kept in the dir where "apply_sfr_reach_obs()" is being called during the forward run """ if seg_reach is None: warnings.warn("Obs will be set up for every reach", PyemuWarning) seg_reach = "all" elif isinstance(seg_reach, list) or isinstance(seg_reach, np.ndarray): if np.ndim(seg_reach) == 1: seg_reach = [seg_reach] assert ( np.shape(seg_reach)[1] == 2 ), "varible seg_reach expected shape (n,2), received {0}".format( np.shape(seg_reach) ) seg_reach = pd.DataFrame(seg_reach, columns=["segment", "reach"]) seg_reach.index = seg_reach.apply( lambda x: "s{0:03d}r{1:03d}".format(int(x.segment), int(x.reach)), axis=1 ) elif isinstance(seg_reach, dict): seg_reach = pd.DataFrame.from_dict( seg_reach, orient="index", columns=["segment", "reach"] ) else: assert isinstance( seg_reach, pd.DataFrame ), "'selection needs to be pandas Dataframe. Type {} passed.".format( type(seg_reach) ) assert np.all( [sr in seg_reach.columns for sr in ["segment", "reach"]] ), "Either 'segment' or 'reach' not in selection columns" sfr_dict = load_sfr_out(sfr_out_file, selection=seg_reach) kpers = list(sfr_dict.keys()) kpers.sort() if isinstance(seg_reach, str) and seg_reach == "all": seg_reach = sfr_dict[kpers[0]][["segment", "reach"]] seg_reach.index = seg_reach.apply( lambda x: "s{0:03d}r{1:03d}".format(int(x.segment), int(x.reach)), axis=1 ) keys = ["sfr_out_file"] if include_path: values = [os.path.split(sfr_out_file)[-1]] else: values = [sfr_out_file] diff = seg_reach.loc[ seg_reach.apply( lambda x: "{0:03d}_{1:03d}".format(int(x.segment), int(x.reach)) not in sfr_dict[list(sfr_dict.keys())[0]].index, axis=1, ) ] if len(diff) > 0: for ob in diff.itertuples(): warnings.warn( "segs,reach pair listed with onames {0} was not found: {1}".format( ob.Index, "({},{})".format(ob.segment, ob.reach) ), PyemuWarning, ) seg_reach = seg_reach.drop(diff.index) seg_reach["obs_base"] = seg_reach.index df_key = pd.DataFrame({"obs_base": keys, "segment": 0, "reach": values}) df_key = pd.concat([df_key, seg_reach], sort=True).reset_index(drop=True) if include_path: pth = os.path.join(*[p for p in os.path.split(sfr_out_file)[:-1]]) config_file = os.path.join(pth, "sfr_reach_obs.config") else: config_file = "sfr_reach_obs.config" print("writing 'sfr_reach_obs.config' to {0}".format(config_file)) df_key.to_csv(config_file) bd = "." if include_path: bd = os.getcwd() os.chdir(pth) try: df = apply_sfr_reach_obs() except Exception as e: os.chdir(bd) raise Exception("error in apply_sfr_reach_obs(): {0}".format(str(e))) os.chdir(bd) if model is not None: dts = ( pd.to_datetime(model.start_datetime) + pd.to_timedelta(np.cumsum(model.dis.perlen.array), unit="d") ).date df.loc[:, "datetime"] = df.kper.apply(lambda x: dts[x]) df.loc[:, "time_str"] = df.datetime.apply(lambda x: x.strftime("%Y%m%d")) else: df.loc[:, "time_str"] = df.kper.apply(lambda x: "{0:04d}".format(x)) df.loc[:, "flaqx_obsnme"] = df.apply( lambda x: "{0}_{1}_{2}".format("fa", x.obs_base, x.time_str), axis=1 ) df.loc[:, "flout_obsnme"] = df.apply( lambda x: "{0}_{1}_{2}".format("fo", x.obs_base, x.time_str), axis=1 ) if ins_file is None: ins_file = sfr_out_file + ".reach_processed.ins" with open(ins_file, "w") as f: f.write("pif ~\nl1\n") for fla, flo in zip(df.flaqx_obsnme, df.flout_obsnme): f.write("l1 w w !{0}! !{1}!\n".format(fla, flo)) df = None pth = os.path.split(ins_file)[:-1] pth = os.path.join(*pth) if pth == "": pth = "." bd = os.getcwd() os.chdir(pth) try: df = try_process_output_file( os.path.split(ins_file)[-1], os.path.split(sfr_out_file + ".processed")[-1] ) except Exception as e: pass os.chdir(bd) if df is not None: df.loc[:, "obsnme"] = df.index.values df.loc[:, "obgnme"] = df.obsnme.apply( lambda x: "flaqx" if x.startswith("fa") else "flout" ) return df def apply_sfr_reach_obs(): """apply the sfr reach observation process. Returns: `pd.DataFrame`: a dataframe of sfr aquifer and outflow ad segment,reach locations Note: This is the companion function of `gw_utils.setup_sfr_reach_obs()`. Requires sfr_reach_obs.config. Writes <sfr_out_file>.processed, where <sfr_out_file> is defined in "sfr_reach_obs.config" """ assert os.path.exists("sfr_reach_obs.config") df_key = pd.read_csv("sfr_reach_obs.config", index_col=0) assert df_key.iloc[0, 0] == "sfr_out_file", df_key.iloc[0, :] sfr_out_file = df_key.iloc[0].reach df_key = df_key.iloc[1:, :].copy() df_key.loc[:, "segment"] = df_key.segment.apply(np.int) df_key.loc[:, "reach"] = df_key.reach.apply(np.int) df_key = df_key.set_index("obs_base") sfr_kper = load_sfr_out(sfr_out_file, df_key) kpers = list(sfr_kper.keys()) kpers.sort() results = [] for kper in kpers: df = sfr_kper[kper] for sr in df_key.itertuples(): ob = df.loc["{0:03d}_{1:03d}".format(sr.segment, sr.reach), :] results.append([kper, sr.Index, ob["flaqx"], ob["flout"]]) df = pd.DataFrame(data=results, columns=["kper", "obs_base", "flaqx", "flout"]) df.sort_values(by=["kper", "obs_base"], inplace=True) df.to_csv(sfr_out_file + ".reach_processed", sep=" ", index=False) return df def modflow_sfr_gag_to_instruction_file( gage_output_file, ins_file=None, parse_filename=False ): """writes an instruction file for an SFR gage output file to read Flow only at all times Args: gage_output_file (`str`): the gage output filename (ASCII). ins_file (`str`, optional): the name of the instruction file to create. If None, the name is `gage_output_file` +".ins". Default is None parse_filename (`bool`): if True, get the gage_num parameter by parsing the gage output file filename if False, get the gage number from the file itself Returns: tuple containing - **pandas.DataFrame**: a dataframe with obsnme and obsval for the sfr simulated flows. - **str**: file name of instructions file relating to gage output. - **str**: file name of processed gage output for all times Note: Sets up observations for gage outputs only for the Flow column. If `parse_namefile` is true, only text up to first '.' is used as the gage_num """ if ins_file is None: ins_file = gage_output_file + ".ins" # navigate the file to be sure the header makes sense indat = [line.strip() for line in open(gage_output_file, "r").readlines()] header = [i for i in indat if i.startswith('"')] # yank out the gage number to identify the observation names if parse_filename: gage_num = os.path.basename(gage_output_file).split(".")[0] else: gage_num = re.sub( "[^0-9]", "", indat[0].lower().split("gage no.")[-1].strip().split()[0] ) # get the column names cols = ( [i.lower() for i in header if "data" in i.lower()][0] .lower() .replace('"', "") .replace("data:", "") .split() ) # make sure "Flow" is included in the columns if "flow" not in cols: raise Exception('Requested field "Flow" not in gage output columns') # find which column is for "Flow" flowidx = np.where(np.array(cols) == "flow")[0][0] # write out the instruction file lines inslines = [ "l1 " + (flowidx + 1) * "w " + "!g{0}_{1:d}!".format(gage_num, j) for j in range(len(indat) - len(header)) ] inslines[0] = inslines[0].replace("l1", "l{0:d}".format(len(header) + 1)) # write the instruction file with open(ins_file, "w") as ofp: ofp.write("pif ~\n") [ofp.write("{0}\n".format(line)) for line in inslines] df = try_process_output_file(ins_file, gage_output_file) return df, ins_file, gage_output_file def setup_gage_obs(gage_file, ins_file=None, start_datetime=None, times=None): """setup a forward run post processor routine for the modflow gage file Args: gage_file (`str`): the gage output file (ASCII) ins_file (`str`, optional): the name of the instruction file to create. If None, the name is `gage_file`+".processed.ins". Default is `None` start_datetime (`str`): a `pandas.to_datetime()` compatible `str`. If not `None`, then the resulting observation names have the datetime suffix. If `None`, the suffix is the output totim. Default is `None`. times ([`float`]): a container of times to make observations for. If None, all times are used. Default is None. Returns: tuple containing - **pandas.DataFrame**: a dataframe with observation name and simulated values for the values in the gage file. - **str**: file name of instructions file that was created relating to gage output. - **str**: file name of processed gage output (processed according to times passed above.) Note: Setups up observations for gage outputs (all columns). This is the companion function of `gw_utils.apply_gage_obs()` """ with open(gage_file, "r") as f: line1 = f.readline() gage_num = int( re.sub("[^0-9]", "", line1.split("GAGE No.")[-1].strip().split()[0]) ) gage_type = line1.split("GAGE No.")[-1].strip().split()[1].lower() obj_num = int(line1.replace('"', "").strip().split()[-1]) line2 = f.readline() df = pd.read_csv( f, delim_whitespace=True, names=line2.replace('"', "").split()[1:] ) df.columns = [ c.lower().replace("-", "_").replace(".", "_").strip("_") for c in df.columns ] # get unique observation ids obs_ids = { col: "" for col in df.columns[1:] } # empty dictionary for observation ids for col in df.columns[1:]: # exclude column 1 (TIME) colspl = col.split("_") if len(colspl) > 1: # obs name built out of "g"(for gage) "s" or "l"(for gage type) 2 chars from column name - date added later obs_ids[col] = "g{0}{1}{2}".format( gage_type[0], colspl[0][0], colspl[-1][0] ) else: obs_ids[col] = "g{0}{1}".format(gage_type[0], col[0:2]) with open( "_gage_obs_ids.csv", "w" ) as f: # write file relating obs names to meaningfull keys! [f.write("{0},{1}\n".format(key, obs)) for key, obs in obs_ids.items()] # find passed times in df if times is None: times = df.time.unique() missing = [] utimes = df.time.unique() for t in times: if not np.isclose(t, utimes).any(): missing.append(str(t)) if len(missing) > 0: print(df.time) raise Exception("the following times are missing:{0}".format(",".join(missing))) # write output times to config file with open("gage_obs.config", "w") as f: f.write(gage_file + "\n") [f.write("{0:15.10E}\n".format(t)) for t in times] # extract data for times: returns dataframe and saves a processed df - read by pest df, obs_file = apply_gage_obs(return_obs_file=True) utimes = df.time.unique() for t in times: assert np.isclose( t, utimes ).any(), "time {0} missing in processed dataframe".format(t) idx = df.time.apply( lambda x: np.isclose(x, times).any() ) # boolean selector of desired times in df if start_datetime is not None: # convert times to usable observation times start_datetime = pd.to_datetime(start_datetime) df.loc[:, "time_str"] = pd.to_timedelta(df.time, unit="d") + start_datetime df.loc[:, "time_str"] = df.time_str.apply( lambda x: datetime.strftime(x, "%Y%m%d") ) else: df.loc[:, "time_str"] = df.time.apply(lambda x: "{0:08.2f}".format(x)) # set up instructions (line feed for lines without obs (not in time) df.loc[:, "ins_str"] = "l1\n" df_times = df.loc[idx, :] # Slice by desired times # TODO include GAGE No. in obs name (if permissible) df.loc[df_times.index, "ins_str"] = df_times.apply( lambda x: "l1 w {}\n".format( " w ".join( ["!{0}{1}!".format(obs, x.time_str) for key, obs in obs_ids.items()] ) ), axis=1, ) df.index = np.arange(df.shape[0]) if ins_file is None: ins_file = gage_file + ".processed.ins" with open(ins_file, "w") as f: f.write("pif ~\nl1\n") [f.write(i) for i in df.ins_str] df = try_process_output_file(ins_file, gage_file + ".processed") return df, ins_file, obs_file def apply_gage_obs(return_obs_file=False): """apply the modflow gage obs post-processor Args: return_obs_file (`bool`): flag to return the processed observation file. Default is `False`. Note: This is the companion function of `gw_utils.setup_gage_obs()` """ times = [] with open("gage_obs.config") as f: gage_file = f.readline().strip() for line in f: times.append(float(line.strip())) obs_file = gage_file + ".processed" with open(gage_file, "r") as f: line1 = f.readline() gage_num = int( re.sub("[^0-9]", "", line1.split("GAGE No.")[-1].strip().split()[0]) ) gage_type = line1.split("GAGE No.")[-1].strip().split()[1].lower() obj_num = int(line1.replace('"', "").strip().split()[-1]) line2 = f.readline() df = pd.read_csv( f, delim_whitespace=True, names=line2.replace('"', "").split()[1:] ) df.columns = [c.lower().replace("-", "_").replace(".", "_") for c in df.columns] df = df.loc[df.time.apply(lambda x: np.isclose(x, times).any()), :] df.to_csv(obs_file, sep=" ", index=False) if return_obs_file: return df, obs_file else: return df def apply_hfb_pars(par_file="hfb6_pars.csv"): """a function to apply HFB multiplier parameters. Args: par_file (`str`): the HFB parameter info file. Default is `hfb_pars.csv` Note: This is the companion function to `gw_utils.write_hfb_zone_multipliers_template()` This is to account for the horrible HFB6 format that differs from other BCs making this a special case Requires "hfb_pars.csv" Should be added to the forward_run.py script """ hfb_pars = pd.read_csv(par_file) hfb_mults_contents = open(hfb_pars.mlt_file.values[0], "r").readlines() skiprows = ( sum([1 if i.strip().startswith("#") else 0 for i in hfb_mults_contents]) + 1 ) header = hfb_mults_contents[:skiprows] # read in the multipliers names = ["lay", "irow1", "icol1", "irow2", "icol2", "hydchr"] hfb_mults = pd.read_csv( hfb_pars.mlt_file.values[0], skiprows=skiprows, delim_whitespace=True, names=names, ).dropna() # read in the original file hfb_org = pd.read_csv( hfb_pars.org_file.values[0], skiprows=skiprows, delim_whitespace=True, names=names, ).dropna() # multiply it out hfb_org.hydchr *= hfb_mults.hydchr for cn in names[:-1]: hfb_mults[cn] = hfb_mults[cn].astype(np.int) hfb_org[cn] = hfb_org[cn].astype(np.int) # write the results with open(hfb_pars.model_file.values[0], "w", newline="") as ofp: [ofp.write("{0}\n".format(line.strip())) for line in header] ofp.flush() hfb_org[["lay", "irow1", "icol1", "irow2", "icol2", "hydchr"]].to_csv( ofp, sep=" ", header=None, index=None ) def write_hfb_zone_multipliers_template(m): """write a template file for an hfb using multipliers per zone (double yuck!) Args: m (`flopy.modflow.Modflow`): a model instance with an HFB package Returns: tuple containing - **dict**: a dictionary with original unique HFB conductivity values and their corresponding parameter names - **str**: the template filename that was created """ if m.hfb6 is None: raise Exception("no HFB package found") # find the model file hfb_file = os.path.join(m.model_ws, m.hfb6.file_name[0]) # this will use multipliers, so need to copy down the original if not os.path.exists(os.path.join(m.model_ws, "hfb6_org")): os.mkdir(os.path.join(m.model_ws, "hfb6_org")) # copy down the original file shutil.copy2( os.path.join(m.model_ws, m.hfb6.file_name[0]), os.path.join(m.model_ws, "hfb6_org", m.hfb6.file_name[0]), ) if not os.path.exists(os.path.join(m.model_ws, "hfb6_mlt")): os.mkdir(os.path.join(m.model_ws, "hfb6_mlt")) # read in the model file hfb_file_contents = open(hfb_file, "r").readlines() # navigate the header skiprows = ( sum([1 if i.strip().startswith("#") else 0 for i in hfb_file_contents]) + 1 ) header = hfb_file_contents[:skiprows] # read in the data names = ["lay", "irow1", "icol1", "irow2", "icol2", "hydchr"] hfb_in = pd.read_csv( hfb_file, skiprows=skiprows, delim_whitespace=True, names=names ).dropna() for cn in names[:-1]: hfb_in[cn] = hfb_in[cn].astype(np.int) # set up a multiplier for each unique conductivity value unique_cond = hfb_in.hydchr.unique() hfb_mults = dict( zip(unique_cond, ["hbz_{0:04d}".format(i) for i in range(len(unique_cond))]) ) # set up the TPL line for each parameter and assign hfb_in["tpl"] = "blank" for cn, cg in hfb_in.groupby("hydchr"): hfb_in.loc[hfb_in.hydchr == cn, "tpl"] = "~{0:^10s}~".format(hfb_mults[cn]) assert "blank" not in hfb_in.tpl # write out the TPL file tpl_file = os.path.join(m.model_ws, "hfb6.mlt.tpl") with open(tpl_file, "w", newline="") as ofp: ofp.write("ptf ~\n") [ofp.write("{0}\n".format(line.strip())) for line in header] ofp.flush() hfb_in[["lay", "irow1", "icol1", "irow2", "icol2", "tpl"]].to_csv( ofp, sep=" ", quotechar=" ", header=None, index=None, mode="a" ) # make a lookup for lining up the necessary files to # perform multiplication with the helpers.apply_hfb_pars() function # which must be added to the forward run script with open(os.path.join(m.model_ws, "hfb6_pars.csv"), "w") as ofp: ofp.write("org_file,mlt_file,model_file\n") ofp.write( "{0},{1},{2}\n".format( os.path.join(m.model_ws, "hfb6_org", m.hfb6.file_name[0]), os.path.join( m.model_ws, "hfb6_mlt", os.path.basename(tpl_file).replace(".tpl", ""), ), hfb_file, ) ) return hfb_mults, tpl_file def write_hfb_template(m): """write a template file for an hfb (yuck!) Args: m (`flopy.modflow.Modflow`): a model instance with an HFB package Returns: tuple containing - **str**: name of the template file that was created - **pandas.DataFrame**: a dataframe with use control file info for the HFB parameters """ assert m.hfb6 is not None hfb_file = os.path.join(m.model_ws, m.hfb6.file_name[0]) assert os.path.exists(hfb_file), "couldn't find hfb_file {0}".format(hfb_file) f_in = open(hfb_file, "r") tpl_file = hfb_file + ".tpl" f_tpl = open(tpl_file, "w") f_tpl.write("ptf ~\n") parnme, parval1, xs, ys = [], [], [], [] iis, jjs, kks = [], [], [] xc = m.sr.xcentergrid yc = m.sr.ycentergrid while True: line = f_in.readline() if line == "": break f_tpl.write(line) if not line.startswith("#"): raw = line.strip().split() nphfb = int(raw[0]) mxfb = int(raw[1]) nhfbnp = int(raw[2]) if nphfb > 0 or mxfb > 0: raise Exception("not supporting terrible HFB pars") for i in range(nhfbnp): line = f_in.readline() if line == "": raise Exception("EOF") raw = line.strip().split() k = int(raw[0]) - 1 i = int(raw[1]) - 1 j = int(raw[2]) - 1 pn = "hb{0:02}{1:04d}{2:04}".format(k, i, j) pv = float(raw[5]) raw[5] = "~ {0} ~".format(pn) line = " ".join(raw) + "\n" f_tpl.write(line) parnme.append(pn) parval1.append(pv) xs.append(xc[i, j]) ys.append(yc[i, j]) iis.append(i) jjs.append(j) kks.append(k) break f_tpl.close() f_in.close() df = pd.DataFrame( { "parnme": parnme, "parval1": parval1, "x": xs, "y": ys, "i": iis, "j": jjs, "k": kks, }, index=parnme, ) df.loc[:, "pargp"] = "hfb_hydfac" df.loc[:, "parubnd"] = df.parval1.max() * 10.0 df.loc[:, "parlbnd"] = df.parval1.min() * 0.1 return tpl_file, df class GsfReader: """ a helper class to read a standard modflow-usg gsf file Args: gsffilename (`str`): filename """ def __init__(self, gsffilename): with open(gsffilename, "r") as f: self.read_data = f.readlines() self.nnode, self.nlay, self.iz, self.ic = [ int(n) for n in self.read_data[1].split() ] self.nvertex = int(self.read_data[2]) def get_vertex_coordinates(self): """ Returns: Dictionary containing list of x, y and z coordinates for each vertex """ # vdata = self.read_data[3:self.nvertex+3] vertex_coords = {} for vert in range(self.nvertex): x, y, z = self.read_data[3 + vert].split() vertex_coords[vert + 1] = [float(x), float(y), float(z)] return vertex_coords def get_node_data(self): """ Returns: nodedf: a pd.DataFrame containing Node information; Node, X, Y, Z, layer, numverts, vertidx """ node_data = [] for node in range(self.nnode): nid, x, y, z, lay, numverts = self.read_data[ self.nvertex + 3 + node ].split()[:6] # vertidx = {'ivertex': [int(n) for n in self.read_data[self.nvertex+3 + node].split()[6:]]} vertidx = [ int(n) for n in self.read_data[self.nvertex + 3 + node].split()[6:] ] node_data.append( [ int(nid), float(x), float(y), float(z), int(lay), int(numverts), vertidx, ] ) nodedf = pd.DataFrame( node_data, columns=["node", "x", "y", "z", "layer", "numverts", "vertidx"] ) return nodedf def get_node_coordinates(self, zcoord=False, zero_based=False): """ Args: zcoord (`bool`): flag to add z coord to coordinates. Default is False zero_based (`bool`): flag to subtract one from the node numbers in the returned node_coords dict. This is needed to support PstFrom. Default is False Returns: node_coords: Dictionary containing x and y coordinates for each node """ node_coords = {} for node in range(self.nnode): nid, x, y, z, lay, numverts = self.read_data[ self.nvertex + 3 + node ].split()[:6] nid = int(nid) if zero_based: nid -= 1 node_coords[nid] = [float(x), float(y)] if zcoord: node_coords[nid] += [float(z)] return node_coords
bsd-3-clause
belkinsky/SFXbot
src/pyAudioAnalysis/audioTrainTest.py
1
46228
import sys import numpy import time import os import glob import pickle import shutil import audioop import signal import csv import ntpath from . import audioFeatureExtraction as aF from . import audioBasicIO from matplotlib.mlab import find import matplotlib.pyplot as plt import scipy.io as sIO from scipy import linalg as la from scipy.spatial import distance import sklearn.svm import sklearn.decomposition import sklearn.ensemble def signal_handler(signal, frame): print('You pressed Ctrl+C! - EXIT') os.system("stty -cbreak echo") sys.exit(0) signal.signal(signal.SIGINT, signal_handler) shortTermWindow = 0.050 shortTermStep = 0.050 eps = 0.00000001 class kNN: def __init__(self, X, Y, k): self.X = X self.Y = Y self.k = k def classify(self, testSample): nClasses = numpy.unique(self.Y).shape[0] YDist = (distance.cdist(self.X, testSample.reshape(1, testSample.shape[0]), 'euclidean')).T iSort = numpy.argsort(YDist) P = numpy.zeros((nClasses,)) for i in range(nClasses): P[i] = numpy.nonzero(self.Y[iSort[0][0:self.k]] == i)[0].shape[0] / float(self.k) return (numpy.argmax(P), P) def classifierWrapper(classifier, classifierType, testSample): ''' This function is used as a wrapper to pattern classification. ARGUMENTS: - classifier: a classifier object of type sklearn.svm.SVC or kNN (defined in this library) or sklearn.ensemble.RandomForestClassifier or sklearn.ensemble.GradientBoostingClassifier or sklearn.ensemble.ExtraTreesClassifier - classifierType: "svm" or "knn" or "randomforests" or "gradientboosting" or "extratrees" - testSample: a feature vector (numpy array) RETURNS: - R: class ID - P: probability estimate EXAMPLE (for some audio signal stored in array x): import audioFeatureExtraction as aF import audioTrainTest as aT # load the classifier (here SVM, for kNN use loadKNNModel instead): [Classifier, MEAN, STD, classNames, mtWin, mtStep, stWin, stStep] = aT.loadSVModel(modelName) # mid-term feature extraction: [MidTermFeatures, _] = aF.mtFeatureExtraction(x, Fs, mtWin * Fs, mtStep * Fs, round(Fs*stWin), round(Fs*stStep)); # feature normalization: curFV = (MidTermFeatures[:, i] - MEAN) / STD; # classification [Result, P] = classifierWrapper(Classifier, modelType, curFV) ''' R = -1 P = -1 if classifierType == "knn": [R, P] = classifier.classify(testSample) elif classifierType == "svm" or classifierType == "randomforest" or classifierType == "gradientboosting" or "extratrees": R = classifier.predict(testSample.reshape(1,-1))[0] P = classifier.predict_proba(testSample.reshape(1,-1))[0] return [R, P] def regressionWrapper(model, modelType, testSample): ''' This function is used as a wrapper to pattern classification. ARGUMENTS: - model: regression model - modelType: "svm" or "knn" (TODO) - testSample: a feature vector (numpy array) RETURNS: - R: regression result (estimated value) EXAMPLE (for some audio signal stored in array x): TODO ''' if modelType == "svm" or modelType == "randomforest": return (model.predict(testSample.reshape(1,-1))[0]) # elif classifierType == "knn": # TODO return None def randSplitFeatures(features, partTrain): ''' def randSplitFeatures(features): This function splits a feature set for training and testing. ARGUMENTS: - features: a list ([numOfClasses x 1]) whose elements containt numpy matrices of features. each matrix features[i] of class i is [numOfSamples x numOfDimensions] - partTrain: percentage RETURNS: - featuresTrains: a list of training data for each class - featuresTest: a list of testing data for each class ''' featuresTrain = [] featuresTest = [] for i, f in enumerate(features): [numOfSamples, numOfDims] = f.shape randperm = numpy.random.permutation(list(range(numOfSamples))) nTrainSamples = int(round(partTrain * numOfSamples)) featuresTrain.append(f[randperm[0:nTrainSamples]]) featuresTest.append(f[randperm[nTrainSamples::]]) return (featuresTrain, featuresTest) def trainKNN(features, K): ''' Train a kNN classifier. ARGUMENTS: - features: a list ([numOfClasses x 1]) whose elements containt numpy matrices of features. each matrix features[i] of class i is [numOfSamples x numOfDimensions] - K: parameter K RETURNS: - kNN: the trained kNN variable ''' [Xt, Yt] = listOfFeatures2Matrix(features) knn = kNN(Xt, Yt, K) return knn def trainSVM(features, Cparam): ''' Train a multi-class probabilitistic SVM classifier. Note: This function is simply a wrapper to the sklearn functionality for SVM training See function trainSVM_feature() to use a wrapper on both the feature extraction and the SVM training (and parameter tuning) processes. ARGUMENTS: - features: a list ([numOfClasses x 1]) whose elements containt numpy matrices of features each matrix features[i] of class i is [numOfSamples x numOfDimensions] - Cparam: SVM parameter C (cost of constraints violation) RETURNS: - svm: the trained SVM variable NOTE: This function trains a linear-kernel SVM for a given C value. For a different kernel, other types of parameters should be provided. ''' [X, Y] = listOfFeatures2Matrix(features) svm = sklearn.svm.SVC(C = Cparam, kernel = 'linear', probability = True) svm.fit(X,Y) return svm def trainRandomForest(features, n_estimators): ''' Train a multi-class decision tree classifier. Note: This function is simply a wrapper to the sklearn functionality for SVM training See function trainSVM_feature() to use a wrapper on both the feature extraction and the SVM training (and parameter tuning) processes. ARGUMENTS: - features: a list ([numOfClasses x 1]) whose elements containt numpy matrices of features each matrix features[i] of class i is [numOfSamples x numOfDimensions] - n_estimators: number of trees in the forest RETURNS: - svm: the trained SVM variable NOTE: This function trains a linear-kernel SVM for a given C value. For a different kernel, other types of parameters should be provided. ''' [X, Y] = listOfFeatures2Matrix(features) rf = sklearn.ensemble.RandomForestClassifier(n_estimators = n_estimators) rf.fit(X,Y) return rf def trainGradientBoosting(features, n_estimators): ''' Train a gradient boosting classifier Note: This function is simply a wrapper to the sklearn functionality for SVM training See function trainSVM_feature() to use a wrapper on both the feature extraction and the SVM training (and parameter tuning) processes. ARGUMENTS: - features: a list ([numOfClasses x 1]) whose elements containt numpy matrices of features each matrix features[i] of class i is [numOfSamples x numOfDimensions] - n_estimators: number of trees in the forest RETURNS: - svm: the trained SVM variable NOTE: This function trains a linear-kernel SVM for a given C value. For a different kernel, other types of parameters should be provided. ''' [X, Y] = listOfFeatures2Matrix(features) rf = sklearn.ensemble.GradientBoostingClassifier(n_estimators = n_estimators) rf.fit(X,Y) return rf def trainExtraTrees(features, n_estimators): ''' Train a gradient boosting classifier Note: This function is simply a wrapper to the sklearn functionality for extra tree classifiers See function trainSVM_feature() to use a wrapper on both the feature extraction and the SVM training (and parameter tuning) processes. ARGUMENTS: - features: a list ([numOfClasses x 1]) whose elements containt numpy matrices of features each matrix features[i] of class i is [numOfSamples x numOfDimensions] - n_estimators: number of trees in the forest RETURNS: - svm: the trained SVM variable NOTE: This function trains a linear-kernel SVM for a given C value. For a different kernel, other types of parameters should be provided. ''' [X, Y] = listOfFeatures2Matrix(features) et = sklearn.ensemble.ExtraTreesClassifier(n_estimators = n_estimators) et.fit(X,Y) return et def trainSVMregression(Features, Y, Cparam): svm = sklearn.svm.SVR(C = Cparam, kernel = 'linear') print(Features.shape, Y) svm.fit(Features,Y) trainError = numpy.mean(numpy.abs(svm.predict(Features) - Y)) return svm, trainError # TODO (not avaiable for regression?) #def trainRandomForestRegression(Features, Y, n_estimators): # rf = sklearn.ensemble.RandomForestClassifier(n_estimators = n_estimators) # print Features.shape, Y # rf.fit(Features,Y) # trainError = numpy.mean(numpy.abs(rf.predict(Features) - Y)) # return rf, trainError def featureAndTrain(listOfDirs, mtWin, mtStep, stWin, stStep, classifierType, modelName, computeBEAT=False, perTrain=0.90): ''' This function is used as a wrapper to segment-based audio feature extraction and classifier training. ARGUMENTS: listOfDirs: list of paths of directories. Each directory contains a signle audio class whose samples are stored in seperate WAV files. mtWin, mtStep: mid-term window length and step stWin, stStep: short-term window and step classifierType: "svm" or "knn" or "randomforest" or "gradientboosting" or "extratrees" modelName: name of the model to be saved RETURNS: None. Resulting classifier along with the respective model parameters are saved on files. ''' # STEP A: Feature Extraction: [features, classNames, _] = aF.dirsWavFeatureExtraction(listOfDirs, mtWin, mtStep, stWin, stStep, computeBEAT=computeBEAT) if len(features) == 0: print("trainSVM_feature ERROR: No data found in any input folder!") return numOfFeatures = features[0].shape[1] featureNames = ["features" + str(d + 1) for d in range(numOfFeatures)] writeTrainDataToARFF(modelName, features, classNames, featureNames) for i, f in enumerate(features): if len(f) == 0: print("trainSVM_feature ERROR: " + listOfDirs[i] + " folder is empty or non-existing!") return # STEP B: Classifier Evaluation and Parameter Selection: if classifierType == "svm": classifierParams = numpy.array([0.001, 0.01, 0.5, 1.0, 5.0, 10.0]) elif classifierType == "randomforest": classifierParams = numpy.array([10, 25, 50, 100,200,500]) elif classifierType == "knn": classifierParams = numpy.array([1, 3, 5, 7, 9, 11, 13, 15]) elif classifierType == "gradientboosting": classifierParams = numpy.array([10, 25, 50, 100,200,500]) elif classifierType == "extratrees": classifierParams = numpy.array([10, 25, 50, 100,200,500]) # get optimal classifeir parameter: bestParam = evaluateClassifier(features, classNames, 100, classifierType, classifierParams, 0, perTrain) print("Selected params: {0:.5f}".format(bestParam)) C = len(classNames) [featuresNorm, MEAN, STD] = normalizeFeatures(features) # normalize features MEAN = MEAN.tolist() STD = STD.tolist() featuresNew = featuresNorm # STEP C: Save the classifier to file if classifierType == "svm": Classifier = trainSVM(featuresNew, bestParam) with open(modelName, 'wb') as fid: # save to file pickle.dump(Classifier, fid) fo = open(modelName + "MEANS", "wb") pickle.dump(MEAN, fo, protocol=pickle.HIGHEST_PROTOCOL) pickle.dump(STD, fo, protocol=pickle.HIGHEST_PROTOCOL) pickle.dump(classNames, fo, protocol=pickle.HIGHEST_PROTOCOL) pickle.dump(mtWin, fo, protocol=pickle.HIGHEST_PROTOCOL) pickle.dump(mtStep, fo, protocol=pickle.HIGHEST_PROTOCOL) pickle.dump(stWin, fo, protocol=pickle.HIGHEST_PROTOCOL) pickle.dump(stStep, fo, protocol=pickle.HIGHEST_PROTOCOL) pickle.dump(computeBEAT, fo, protocol=pickle.HIGHEST_PROTOCOL) fo.close() elif classifierType == "randomforest": Classifier = trainRandomForest(featuresNew, bestParam) with open(modelName, 'wb') as fid: # save to file pickle.dump(Classifier, fid) fo = open(modelName + "MEANS", "wb") pickle.dump(MEAN, fo, protocol=pickle.HIGHEST_PROTOCOL) pickle.dump(STD, fo, protocol=pickle.HIGHEST_PROTOCOL) pickle.dump(classNames, fo, protocol=pickle.HIGHEST_PROTOCOL) pickle.dump(mtWin, fo, protocol=pickle.HIGHEST_PROTOCOL) pickle.dump(mtStep, fo, protocol=pickle.HIGHEST_PROTOCOL) pickle.dump(stWin, fo, protocol=pickle.HIGHEST_PROTOCOL) pickle.dump(stStep, fo, protocol=pickle.HIGHEST_PROTOCOL) pickle.dump(computeBEAT, fo, protocol=pickle.HIGHEST_PROTOCOL) fo.close() elif classifierType == "gradientboosting": Classifier = trainGradientBoosting(featuresNew, bestParam) with open(modelName, 'wb') as fid: # save to file pickle.dump(Classifier, fid) fo = open(modelName + "MEANS", "wb") pickle.dump(MEAN, fo, protocol=pickle.HIGHEST_PROTOCOL) pickle.dump(STD, fo, protocol=pickle.HIGHEST_PROTOCOL) pickle.dump(classNames, fo, protocol=pickle.HIGHEST_PROTOCOL) pickle.dump(mtWin, fo, protocol=pickle.HIGHEST_PROTOCOL) pickle.dump(mtStep, fo, protocol=pickle.HIGHEST_PROTOCOL) pickle.dump(stWin, fo, protocol=pickle.HIGHEST_PROTOCOL) pickle.dump(stStep, fo, protocol=pickle.HIGHEST_PROTOCOL) pickle.dump(computeBEAT, fo, protocol=pickle.HIGHEST_PROTOCOL) fo.close() elif classifierType == "extratrees": Classifier = trainExtraTrees(featuresNew, bestParam) with open(modelName, 'wb') as fid: # save to file pickle.dump(Classifier, fid) fo = open(modelName + "MEANS", "wb") pickle.dump(MEAN, fo, protocol=pickle.HIGHEST_PROTOCOL) pickle.dump(STD, fo, protocol=pickle.HIGHEST_PROTOCOL) pickle.dump(classNames, fo, protocol=pickle.HIGHEST_PROTOCOL) pickle.dump(mtWin, fo, protocol=pickle.HIGHEST_PROTOCOL) pickle.dump(mtStep, fo, protocol=pickle.HIGHEST_PROTOCOL) pickle.dump(stWin, fo, protocol=pickle.HIGHEST_PROTOCOL) pickle.dump(stStep, fo, protocol=pickle.HIGHEST_PROTOCOL) pickle.dump(computeBEAT, fo, protocol=pickle.HIGHEST_PROTOCOL) fo.close() elif classifierType == "knn": [X, Y] = listOfFeatures2Matrix(featuresNew) X = X.tolist() Y = Y.tolist() fo = open(modelName, "wb") pickle.dump(X, fo, protocol=pickle.HIGHEST_PROTOCOL) pickle.dump(Y, fo, protocol=pickle.HIGHEST_PROTOCOL) pickle.dump(MEAN, fo, protocol=pickle.HIGHEST_PROTOCOL) pickle.dump(STD, fo, protocol=pickle.HIGHEST_PROTOCOL) pickle.dump(classNames, fo, protocol=pickle.HIGHEST_PROTOCOL) pickle.dump(bestParam, fo, protocol=pickle.HIGHEST_PROTOCOL) pickle.dump(mtWin, fo, protocol=pickle.HIGHEST_PROTOCOL) pickle.dump(mtStep, fo, protocol=pickle.HIGHEST_PROTOCOL) pickle.dump(stWin, fo, protocol=pickle.HIGHEST_PROTOCOL) pickle.dump(stStep, fo, protocol=pickle.HIGHEST_PROTOCOL) pickle.dump(computeBEAT, fo, protocol=pickle.HIGHEST_PROTOCOL) fo.close() def featureAndTrainRegression(dirName, mtWin, mtStep, stWin, stStep, modelType, modelName, computeBEAT=False): ''' This function is used as a wrapper to segment-based audio feature extraction and classifier training. ARGUMENTS: dirName: path of directory containing the WAV files and Regression CSVs mtWin, mtStep: mid-term window length and step stWin, stStep: short-term window and step modelType: "svm" or "knn" or "randomforest" modelName: name of the model to be saved RETURNS: None. Resulting regression model along with the respective model parameters are saved on files. ''' # STEP A: Feature Extraction: [features, _, fileNames] = aF.dirsWavFeatureExtraction([dirName], mtWin, mtStep, stWin, stStep, computeBEAT=computeBEAT) features = features[0] fileNames = [ntpath.basename(f) for f in fileNames[0]] # Read CSVs: CSVs = glob.glob(dirName + os.sep + "*.csv") regressionLabels = [] regressionNames = [] for c in CSVs: # for each CSV curRegressionLabels = numpy.zeros((len(fileNames, ))) # read filenames, map to "fileNames" and append respective values in the regressionLabels with open(c, 'rb') as csvfile: CSVreader = csv.reader(csvfile, delimiter=',', quotechar='|') for row in CSVreader: if len(row) == 2: if row[0]+".wav" in fileNames: index = fileNames.index(row[0]+".wav") curRegressionLabels[index] = float(row[1]) regressionLabels.append(curRegressionLabels) # curRegressionLabels is the list of values for the current regression problem regressionNames.append(ntpath.basename(c).replace(".csv", "")) # regression task name if len(features) == 0: print("ERROR: No data found in any input folder!") return numOfFeatures = features.shape[1] # TODO: ARRF WRITE???? # STEP B: Classifier Evaluation and Parameter Selection: if modelType == "svm": modelParams = numpy.array([0.001, 0.005, 0.01, 0.05, 0.1, 0.25, 0.5, 1.0, 5.0, 10.0]) elif modelType == "randomforest": modelParams = numpy.array([5, 10, 25, 50, 100]) # elif modelType == "knn": # modelParams = numpy.array([1, 3, 5, 7, 9, 11, 13, 15]); for iRegression, r in enumerate(regressionNames): # get optimal classifeir parameter: print("Regression task " + r) bestParam = evaluateRegression(features, regressionLabels[iRegression], 100, modelType, modelParams) print("Selected params: {0:.5f}".format(bestParam)) [featuresNorm, MEAN, STD] = normalizeFeatures([features]) # normalize features # STEP C: Save the model to file if modelType == "svm": Classifier, _ = trainSVMregression(featuresNorm[0], regressionLabels[iRegression], bestParam) with open(modelName + "_" + r, 'wb') as fid: # save to file pickle.dump(Classifier, fid) fo = open(modelName + "_" + r + "MEANS", "wb") pickle.dump(MEAN, fo, protocol=pickle.HIGHEST_PROTOCOL) pickle.dump(STD, fo, protocol=pickle.HIGHEST_PROTOCOL) pickle.dump(mtWin, fo, protocol=pickle.HIGHEST_PROTOCOL) pickle.dump(mtStep, fo, protocol=pickle.HIGHEST_PROTOCOL) pickle.dump(stWin, fo, protocol=pickle.HIGHEST_PROTOCOL) pickle.dump(stStep, fo, protocol=pickle.HIGHEST_PROTOCOL) pickle.dump(computeBEAT, fo, protocol=pickle.HIGHEST_PROTOCOL) fo.close() ''' TODO elif modelType == "randomforest": Classifier, _ = trainRandomForestRegression(featuresNorm[0], regressionLabels[iRegression], bestParam) with open(modelName + "_" + r, 'wb') as fid: # save to file cPickle.dump(Classifier, fid) fo = open(modelName + "_" + r + "MEANS", "wb") cPickle.dump(MEAN, fo, protocol=cPickle.HIGHEST_PROTOCOL) cPickle.dump(STD, fo, protocol=cPickle.HIGHEST_PROTOCOL) cPickle.dump(mtWin, fo, protocol=cPickle.HIGHEST_PROTOCOL) cPickle.dump(mtStep, fo, protocol=cPickle.HIGHEST_PROTOCOL) cPickle.dump(stWin, fo, protocol=cPickle.HIGHEST_PROTOCOL) cPickle.dump(stStep, fo, protocol=cPickle.HIGHEST_PROTOCOL) cPickle.dump(computeBEAT, fo, protocol=cPickle.HIGHEST_PROTOCOL) fo.close() ''' # elif classifierType == "knn": def loadKNNModel(kNNModelName, isRegression=False): try: fo = open(kNNModelName, "rb") except IOError: print("didn't find file") return try: X = pickle.load(fo) Y = pickle.load(fo) MEAN = pickle.load(fo) STD = pickle.load(fo) if not isRegression: classNames = pickle.load(fo) K = pickle.load(fo) mtWin = pickle.load(fo) mtStep = pickle.load(fo) stWin = pickle.load(fo) stStep = pickle.load(fo) computeBEAT = pickle.load(fo) except: fo.close() fo.close() X = numpy.array(X) Y = numpy.array(Y) MEAN = numpy.array(MEAN) STD = numpy.array(STD) Classifier = kNN(X, Y, K) # Note: a direct call to the kNN constructor is used here if isRegression: return(Classifier, MEAN, STD, mtWin, mtStep, stWin, stStep, computeBEAT) else: return(Classifier, MEAN, STD, classNames, mtWin, mtStep, stWin, stStep, computeBEAT) def loadSVModel(SVMmodelName, isRegression=False): ''' This function loads an SVM model either for classification or training. ARGMUMENTS: - SVMmodelName: the path of the model to be loaded - isRegression: a flag indigating whereas this model is regression or not ''' try: fo = open(SVMmodelName+"MEANS", "rb") except IOError: print("Load SVM Model: Didn't find file") return try: MEAN = pickle.load(fo) STD = pickle.load(fo) if not isRegression: classNames = pickle.load(fo) mtWin = pickle.load(fo) mtStep = pickle.load(fo) stWin = pickle.load(fo) stStep = pickle.load(fo) computeBEAT = pickle.load(fo) except: fo.close() fo.close() MEAN = numpy.array(MEAN) STD = numpy.array(STD) COEFF = [] with open(SVMmodelName, 'rb') as fid: SVM = pickle.load(fid) if isRegression: return(SVM, MEAN, STD, mtWin, mtStep, stWin, stStep, computeBEAT) else: return(SVM, MEAN, STD, classNames, mtWin, mtStep, stWin, stStep, computeBEAT) def loadRandomForestModel(RFmodelName, isRegression=False): ''' This function loads an SVM model either for classification or training. ARGMUMENTS: - SVMmodelName: the path of the model to be loaded - isRegression: a flag indigating whereas this model is regression or not ''' try: fo = open(RFmodelName+"MEANS", "rb") except IOError: print("Load Random Forest Model: Didn't find file") return try: MEAN = pickle.load(fo) STD = pickle.load(fo) if not isRegression: classNames = pickle.load(fo) mtWin = pickle.load(fo) mtStep = pickle.load(fo) stWin = pickle.load(fo) stStep = pickle.load(fo) computeBEAT = pickle.load(fo) except: fo.close() fo.close() MEAN = numpy.array(MEAN) STD = numpy.array(STD) COEFF = [] with open(RFmodelName, 'rb') as fid: RF = pickle.load(fid) if isRegression: return(RF, MEAN, STD, mtWin, mtStep, stWin, stStep, computeBEAT) else: return(RF, MEAN, STD, classNames, mtWin, mtStep, stWin, stStep, computeBEAT) def loadGradientBoostingModel(GBModelName, isRegression=False): ''' This function loads gradient boosting either for classification or training. ARGMUMENTS: - SVMmodelName: the path of the model to be loaded - isRegression: a flag indigating whereas this model is regression or not ''' try: fo = open(GBModelName+"MEANS", "rb") except IOError: print("Load Random Forest Model: Didn't find file") return try: MEAN = pickle.load(fo) STD = pickle.load(fo) if not isRegression: classNames = pickle.load(fo) mtWin = pickle.load(fo) mtStep = pickle.load(fo) stWin = pickle.load(fo) stStep = pickle.load(fo) computeBEAT = pickle.load(fo) except: fo.close() fo.close() MEAN = numpy.array(MEAN) STD = numpy.array(STD) COEFF = [] with open(GBModelName, 'rb') as fid: GB = pickle.load(fid) if isRegression: return(GB, MEAN, STD, mtWin, mtStep, stWin, stStep, computeBEAT) else: return(GB, MEAN, STD, classNames, mtWin, mtStep, stWin, stStep, computeBEAT) def loadExtraTreesModel(ETmodelName, isRegression=False): ''' This function loads extra trees either for classification or training. ARGMUMENTS: - SVMmodelName: the path of the model to be loaded - isRegression: a flag indigating whereas this model is regression or not ''' try: fo = open(ETmodelName+"MEANS", "rb") except IOError: print("Load Random Forest Model: Didn't find file") return try: MEAN = pickle.load(fo) STD = pickle.load(fo) if not isRegression: classNames = pickle.load(fo) mtWin = pickle.load(fo) mtStep = pickle.load(fo) stWin = pickle.load(fo) stStep = pickle.load(fo) computeBEAT = pickle.load(fo) except: fo.close() fo.close() MEAN = numpy.array(MEAN) STD = numpy.array(STD) COEFF = [] with open(ETmodelName, 'rb') as fid: GB = pickle.load(fid) if isRegression: return(GB, MEAN, STD, mtWin, mtStep, stWin, stStep, computeBEAT) else: return(GB, MEAN, STD, classNames, mtWin, mtStep, stWin, stStep, computeBEAT) def evaluateClassifier(features, ClassNames, nExp, ClassifierName, Params, parameterMode, perTrain=0.90): ''' ARGUMENTS: features: a list ([numOfClasses x 1]) whose elements containt numpy matrices of features. each matrix features[i] of class i is [numOfSamples x numOfDimensions] ClassNames: list of class names (strings) nExp: number of cross-validation experiments ClassifierName: svm or knn or randomforest Params: list of classifier parameters (for parameter tuning during cross-validation) parameterMode: 0: choose parameters that lead to maximum overall classification ACCURACY 1: choose parameters that lead to maximum overall F1 MEASURE RETURNS: bestParam: the value of the input parameter that optimizes the selected performance measure ''' # feature normalization: (featuresNorm, MEAN, STD) = normalizeFeatures(features) #featuresNorm = features; nClasses = len(features) CAll = [] acAll = [] F1All = [] PrecisionClassesAll = [] RecallClassesAll = [] ClassesAll = [] F1ClassesAll = [] CMsAll = [] # compute total number of samples: nSamplesTotal = 0 for f in features: nSamplesTotal += f.shape[0] if nSamplesTotal > 1000 and nExp > 50: nExp = 50 print("Number of training experiments changed to 50 due to high number of samples") if nSamplesTotal > 2000 and nExp > 10: nExp = 10 print("Number of training experiments changed to 10 due to high number of samples") for Ci, C in enumerate(Params): # for each param value CM = numpy.zeros((nClasses, nClasses)) for e in range(nExp): # for each cross-validation iteration: print("Param = {0:.5f} - Classifier Evaluation Experiment {1:d} of {2:d}".format(C, e+1, nExp)) # split features: featuresTrain, featuresTest = randSplitFeatures(featuresNorm, perTrain) # train multi-class svms: if ClassifierName == "svm": Classifier = trainSVM(featuresTrain, C) elif ClassifierName == "knn": Classifier = trainKNN(featuresTrain, C) elif ClassifierName == "randomforest": Classifier = trainRandomForest(featuresTrain, C) elif ClassifierName == "gradientboosting": Classifier = trainGradientBoosting(featuresTrain, C) elif ClassifierName == "extratrees": Classifier = trainExtraTrees(featuresTrain, C) CMt = numpy.zeros((nClasses, nClasses)) for c1 in range(nClasses): #Results = Classifier.pred(featuresTest[c1]) nTestSamples = len(featuresTest[c1]) Results = numpy.zeros((nTestSamples, 1)) for ss in range(nTestSamples): [Results[ss], _] = classifierWrapper(Classifier, ClassifierName, featuresTest[c1][ss]) for c2 in range(nClasses): CMt[c1][c2] = float(len(numpy.nonzero(Results == c2)[0])) CM = CM + CMt CM = CM + 0.0000000010 Rec = numpy.zeros((CM.shape[0], )) Pre = numpy.zeros((CM.shape[0], )) for ci in range(CM.shape[0]): Rec[ci] = CM[ci, ci] / numpy.sum(CM[ci, :]) Pre[ci] = CM[ci, ci] / numpy.sum(CM[:, ci]) PrecisionClassesAll.append(Pre) RecallClassesAll.append(Rec) F1 = 2 * Rec * Pre / (Rec + Pre) F1ClassesAll.append(F1) acAll.append(numpy.sum(numpy.diagonal(CM)) / numpy.sum(CM)) CMsAll.append(CM) F1All.append(numpy.mean(F1)) # print "{0:6.4f}{1:6.4f}{2:6.1f}{3:6.1f}".format(nu, g, 100.0*acAll[-1], 100.0*F1All[-1]) print(("\t\t"), end=' ') for i, c in enumerate(ClassNames): if i == len(ClassNames)-1: print("{0:s}\t\t".format(c), end=' ') else: print("{0:s}\t\t\t".format(c), end=' ') print ("OVERALL") print(("\tC"), end=' ') for c in ClassNames: print("\tPRE\tREC\tF1", end=' ') print("\t{0:s}\t{1:s}".format("ACC", "F1")) bestAcInd = numpy.argmax(acAll) bestF1Ind = numpy.argmax(F1All) for i in range(len(PrecisionClassesAll)): print("\t{0:.3f}".format(Params[i]), end=' ') for c in range(len(PrecisionClassesAll[i])): print("\t{0:.1f}\t{1:.1f}\t{2:.1f}".format(100.0 * PrecisionClassesAll[i][c], 100.0 * RecallClassesAll[i][c], 100.0 * F1ClassesAll[i][c]), end=' ') print("\t{0:.1f}\t{1:.1f}".format(100.0 * acAll[i], 100.0 * F1All[i]), end=' ') if i == bestF1Ind: print("\t best F1", end=' ') if i == bestAcInd: print("\t best Acc", end=' ') print() if parameterMode == 0: # keep parameters that maximize overall classification accuracy: print("Confusion Matrix:") printConfusionMatrix(CMsAll[bestAcInd], ClassNames) return Params[bestAcInd] elif parameterMode == 1: # keep parameters that maximize overall F1 measure: print("Confusion Matrix:") printConfusionMatrix(CMsAll[bestF1Ind], ClassNames) return Params[bestF1Ind] def evaluateRegression(features, labels, nExp, MethodName, Params): ''' ARGUMENTS: features: numpy matrices of features [numOfSamples x numOfDimensions] labels: list of sample labels nExp: number of cross-validation experiments MethodName: "svm" or "randomforest" Params: list of classifier params to be evaluated RETURNS: bestParam: the value of the input parameter that optimizes the selected performance measure ''' # feature normalization: (featuresNorm, MEAN, STD) = normalizeFeatures([features]) featuresNorm = featuresNorm[0] nSamples = labels.shape[0] partTrain = 0.9 ErrorsAll = [] ErrorsTrainAll = [] ErrorsBaselineAll = [] for Ci, C in enumerate(Params): # for each param value Errors = [] ErrorsTrain = [] ErrorsBaseline = [] for e in range(nExp): # for each cross-validation iteration: # split features: randperm = numpy.random.permutation(list(range(nSamples))) nTrain = int(round(partTrain * nSamples)) featuresTrain = [featuresNorm[randperm[i]] for i in range(nTrain)] featuresTest = [featuresNorm[randperm[i+nTrain]] for i in range(nSamples - nTrain)] labelsTrain = [labels[randperm[i]] for i in range(nTrain)] labelsTest = [labels[randperm[i + nTrain]] for i in range(nSamples - nTrain)] # train multi-class svms: featuresTrain = numpy.matrix(featuresTrain) if MethodName == "svm": [Classifier, trainError] = trainSVMregression(featuresTrain, labelsTrain, C) # TODO #elif MethodName == "randomforest": # [Classifier, trainError] = trainRandomForestRegression(featuresTrain, labelsTrain, C) # TODO KNN # elif ClassifierName=="knn": # Classifier = trainKNN(featuresTrain, C) ErrorTest = [] ErrorTestBaseline = [] for itest, fTest in enumerate(featuresTest): R = regressionWrapper(Classifier, MethodName, fTest) Rbaseline = numpy.mean(labelsTrain) ErrorTest.append((R - labelsTest[itest]) * (R - labelsTest[itest])) ErrorTestBaseline.append((Rbaseline - labelsTest[itest]) * (Rbaseline - labelsTest[itest])) Error = numpy.array(ErrorTest).mean() ErrorBaseline = numpy.array(ErrorTestBaseline).mean() Errors.append(Error) ErrorsTrain.append(trainError) ErrorsBaseline.append(ErrorBaseline) ErrorsAll.append(numpy.array(Errors).mean()) ErrorsTrainAll.append(numpy.array(ErrorsTrain).mean()) ErrorsBaselineAll.append(numpy.array(ErrorsBaseline).mean()) bestInd = numpy.argmin(ErrorsAll) print("{0:s}\t\t{1:s}\t\t{2:s}\t\t{3:s}".format("Param", "MSE", "T-MSE", "R-MSE")) for i in range(len(ErrorsAll)): print("{0:.4f}\t\t{1:.2f}\t\t{2:.2f}\t\t{3:.2f}".format(Params[i], ErrorsAll[i], ErrorsTrainAll[i], ErrorsBaselineAll[i]), end=' ') if i == bestInd: print("\t\t best", end=' ') print() return Params[bestInd] def printConfusionMatrix(CM, ClassNames): ''' This function prints a confusion matrix for a particular classification task. ARGUMENTS: CM: a 2-D numpy array of the confusion matrix (CM[i,j] is the number of times a sample from class i was classified in class j) ClassNames: a list that contains the names of the classes ''' if CM.shape[0] != len(ClassNames): print("printConfusionMatrix: Wrong argument sizes\n") return for c in ClassNames: if len(c) > 4: c = c[0:3] print("\t{0:s}".format(c), end=' ') print() for i, c in enumerate(ClassNames): if len(c) > 4: c = c[0:3] print("{0:s}".format(c), end=' ') for j in range(len(ClassNames)): print("\t{0:.1f}".format(100.0 * CM[i][j] / numpy.sum(CM)), end=' ') print() def normalizeFeatures(features): ''' This function normalizes a feature set to 0-mean and 1-std. Used in most classifier trainning cases. ARGUMENTS: - features: list of feature matrices (each one of them is a numpy matrix) RETURNS: - featuresNorm: list of NORMALIZED feature matrices - MEAN: mean vector - STD: std vector ''' X = numpy.array([]) for count, f in enumerate(features): if f.shape[0] > 0: if count == 0: X = f else: X = numpy.vstack((X, f)) count += 1 MEAN = numpy.mean(X, axis=0) STD = numpy.std(X, axis=0) featuresNorm = [] for f in features: ft = f.copy() for nSamples in range(f.shape[0]): ft[nSamples, :] = (ft[nSamples, :] - MEAN) / STD featuresNorm.append(ft) return (featuresNorm, MEAN, STD) def listOfFeatures2Matrix(features): ''' listOfFeatures2Matrix(features) This function takes a list of feature matrices as argument and returns a single concatenated feature matrix and the respective class labels. ARGUMENTS: - features: a list of feature matrices RETURNS: - X: a concatenated matrix of features - Y: a vector of class indeces ''' X = numpy.array([]) Y = numpy.array([]) for i, f in enumerate(features): if i == 0: X = f Y = i * numpy.ones((len(f), 1)) else: X = numpy.vstack((X, f)) Y = numpy.append(Y, i * numpy.ones((len(f), 1))) return (X, Y) def pcaDimRed(features, nDims): [X, Y] = listOfFeatures2Matrix(features) pca = sklearn.decomposition.PCA(n_components = nDims) pca.fit(X) coeff = pca.components_ coeff = coeff[:, 0:nDims] featuresNew = [] for f in features: ft = f.copy() # ft = pca.transform(ft, k=nDims) ft = numpy.dot(f, coeff) featuresNew.append(ft) return (featuresNew, coeff) def fileClassification(inputFile, modelName, modelType): # Load classifier: if not os.path.isfile(inputFile): print("fileClassification: wav file not found!") return (-1, -1, -1) [Fs, x] = audioBasicIO.readAudioFile(inputFile) # read audio file and convert to mono x = audioBasicIO.stereo2mono(x) return fragmentClassification(Fs, x, modelName, modelType) def fragmentClassification(Fs, x, modelName, modelType): if not os.path.isfile(modelName): print("fileClassification: input modelName not found!") return (-1, -1, -1) if modelType == 'svm': [Classifier, MEAN, STD, classNames, mtWin, mtStep, stWin, stStep, computeBEAT] = loadSVModel(modelName) elif modelType == 'knn': [Classifier, MEAN, STD, classNames, mtWin, mtStep, stWin, stStep, computeBEAT] = loadKNNModel(modelName) elif modelType == 'randomforest': [Classifier, MEAN, STD, classNames, mtWin, mtStep, stWin, stStep, computeBEAT] = loadRandomForestModel(modelName) elif modelType == 'gradientboosting': [Classifier, MEAN, STD, classNames, mtWin, mtStep, stWin, stStep, computeBEAT] = loadGradientBoostingModel(modelName) elif modelType == 'extratrees': [Classifier, MEAN, STD, classNames, mtWin, mtStep, stWin, stStep, computeBEAT] = loadExtraTreesModel(modelName) # feature extraction: [MidTermFeatures, s] = aF.mtFeatureExtraction(x, Fs, mtWin * Fs, mtStep * Fs, round(Fs * stWin), round(Fs * stStep)) MidTermFeatures = MidTermFeatures.mean(axis=1) # long term averaging of mid-term statistics if computeBEAT: [beat, beatConf] = aF.beatExtraction(s, stStep) MidTermFeatures = numpy.append(MidTermFeatures, beat) MidTermFeatures = numpy.append(MidTermFeatures, beatConf) curFV = (MidTermFeatures - MEAN) / STD # normalization [Result, P] = classifierWrapper(Classifier, modelType, curFV) # classification return Result, P, classNames def fileRegression(inputFile, modelName, modelType): # Load classifier: if not os.path.isfile(inputFile): print("fileClassification: wav file not found!") return (-1, -1, -1) regressionModels = glob.glob(modelName + "_*") regressionModels2 = [] for r in regressionModels: if r[-5::] != "MEANS": regressionModels2.append(r) regressionModels = regressionModels2 regressionNames = [] for r in regressionModels: regressionNames.append(r[r.rfind("_")+1::]) # FEATURE EXTRACTION # LOAD ONLY THE FIRST MODEL (for mtWin, etc) if modelType == 'svm': [_, _, _, mtWin, mtStep, stWin, stStep, computeBEAT] = loadSVModel(regressionModels[0], True) elif modelType == 'knn': [_, _, _, mtWin, mtStep, stWin, stStep, computeBEAT] = loadKNNModel(regressionModels[0], True) [Fs, x] = audioBasicIO.readAudioFile(inputFile) # read audio file and convert to mono x = audioBasicIO.stereo2mono(x) # feature extraction: [MidTermFeatures, s] = aF.mtFeatureExtraction(x, Fs, mtWin * Fs, mtStep * Fs, round(Fs * stWin), round(Fs * stStep)) MidTermFeatures = MidTermFeatures.mean(axis=1) # long term averaging of mid-term statistics if computeBEAT: [beat, beatConf] = aF.beatExtraction(s, stStep) MidTermFeatures = numpy.append(MidTermFeatures, beat) MidTermFeatures = numpy.append(MidTermFeatures, beatConf) # REGRESSION R = [] for ir, r in enumerate(regressionModels): if not os.path.isfile(r): print("fileClassification: input modelName not found!") return (-1, -1, -1) if modelType == 'svm': [Model, MEAN, STD, mtWin, mtStep, stWin, stStep, computeBEAT] = loadSVModel(r, True) elif modelType == 'knn': [Model, MEAN, STD, mtWin, mtStep, stWin, stStep, computeBEAT] = loadKNNModel(r, True) curFV = (MidTermFeatures - MEAN) / STD # normalization R.append(regressionWrapper(Model, modelType, curFV)) # classification return R, regressionNames def lda(data, labels, redDim): # Centre data data -= data.mean(axis=0) nData = numpy.shape(data)[0] nDim = numpy.shape(data)[1] print(nData, nDim) Sw = numpy.zeros((nDim, nDim)) Sb = numpy.zeros((nDim, nDim)) C = numpy.cov((data.T)) # Loop over classes classes = numpy.unique(labels) for i in range(len(classes)): # Find relevant datapoints indices = (numpy.where(labels == classes[i])) d = numpy.squeeze(data[indices, :]) classcov = numpy.cov((d.T)) Sw += float(numpy.shape(indices)[0])/nData * classcov Sb = C - Sw # Now solve for W # Compute eigenvalues, eigenvectors and sort into order #evals,evecs = linalg.eig(dot(linalg.pinv(Sw),sqrt(Sb))) evals, evecs = la.eig(Sw, Sb) indices = numpy.argsort(evals) indices = indices[::-1] evecs = evecs[:, indices] evals = evals[indices] w = evecs[:, :redDim] #print evals, w newData = numpy.dot(data, w) #for i in range(newData.shape[0]): # plt.text(newData[i,0],newData[i,1],str(labels[i])) #plt.xlim([newData[:,0].min(), newData[:,0].max()]) #plt.ylim([newData[:,1].min(), newData[:,1].max()]) #plt.show() return newData, w def writeTrainDataToARFF(modelName, features, classNames, featureNames): f = open(modelName + ".arff", 'w') f.write('@RELATION ' + modelName + '\n') for fn in featureNames: f.write('@ATTRIBUTE ' + fn + ' NUMERIC\n') f.write('@ATTRIBUTE class {') for c in range(len(classNames)-1): f.write(classNames[c] + ',') f.write(classNames[-1] + '}\n\n') f.write('@DATA\n') for c, fe in enumerate(features): for i in range(fe.shape[0]): for j in range(fe.shape[1]): f.write("{0:f},".format(fe[i, j])) f.write(classNames[c]+"\n") f.close() def trainSpeakerModelsScript(): ''' This script is used to train the speaker-related models (NOTE: data paths are hard-coded and NOT included in the library, the models are, however included) import audioTrainTest as aT aT.trainSpeakerModelsScript() ''' mtWin = 2.0 mtStep = 2.0 stWin = 0.020 stStep = 0.020 dirName = "DIARIZATION_ALL/all" listOfDirs = [os.path.join(dirName, name) for name in os.listdir(dirName) if os.path.isdir(os.path.join(dirName, name))] featureAndTrain(listOfDirs, mtWin, mtStep, stWin, stStep, "knn", "data/knnSpeakerAll", computeBEAT=False, perTrain=0.50) dirName = "DIARIZATION_ALL/female_male" listOfDirs = [os.path.join(dirName, name) for name in os.listdir(dirName) if os.path.isdir(os.path.join(dirName, name))] featureAndTrain(listOfDirs, mtWin, mtStep, stWin, stStep, "knn", "data/knnSpeakerFemaleMale", computeBEAT=False, perTrain=0.50) def main(argv): return 0 if __name__ == '__main__': main(sys.argv)
mit
mjvakili/ccppabc
ccppabc/code/archive/wp_covariance.py
1
1717
from halotools.empirical_models import Zheng07 , model_defaults from halotools.mock_observables import wp from halotools.mock_observables.clustering import tpcf from halotools.empirical_models.mock_helpers import (three_dim_pos_bundle, infer_mask_from_kwargs) from halotools.mock_observables.clustering import wp from halotools.sim_manager import supported_sims import matplotlib.pyplot as plt plt.switch_backend("Agg") import time import numpy as np model = Zheng07() xir = [] for i in range(500): model.populate_mock() xir.append(model.mock.compute_galaxy_clustering()[1]) covar = np.cov(np.array(xir).T) np.savetxt("clustering_covariance_Mr20.dat" , covar) """ a = time.time() model.mock.compute_galaxy_clustering() print time.time() - a rbins = model_defaults.default_rbins rbin_centers = (rbins[1:] + rbins[:-1])/2. cat = supported_sims.HaloCatalog() l = cat.Lbox print l p_bins = np.linspace(0,l/2,200) mask = infer_mask_from_kwargs(model.mock.galaxy_table) pos = three_dim_pos_bundle(table=model.mock.galaxy_table, key1='x', key2='y', key3='z', mask=mask, return_complement=False) figure = plt.figure(figsize=(10,10)) cl = wp(pos , rbins, p_bins , period = l , estimator = 'Landy-Szalay') for n_pbins in np.array([2,8,16]): p_bins = np.linspace(0 , l/2 , n_pbins) a = time.time() clustering = wp(pos, rbins, p_bins , period = l , estimator = 'Landy-Szalay') print time.time() - a plt.plot(rbin_centers , (clustering)/cl , label = "$N\pi_{bin}$="+str(n_pbins) , lw = 2) plt.xscale("Log") plt.yscale("Log") plt.legend() plt.savefig("/home/mj/public_html/wpex.png")"""
mit
mblondel/scikit-learn
sklearn/utils/tests/test_utils.py
23
6045
import warnings import numpy as np import scipy.sparse as sp from scipy.linalg import pinv2 from sklearn.utils.testing import (assert_equal, assert_raises, assert_true, assert_almost_equal, assert_array_equal, SkipTest) from sklearn.utils import check_random_state from sklearn.utils import deprecated from sklearn.utils import resample from sklearn.utils import safe_mask from sklearn.utils import column_or_1d from sklearn.utils import safe_indexing from sklearn.utils import shuffle from sklearn.utils.extmath import pinvh from sklearn.utils.mocking import MockDataFrame def test_make_rng(): """Check the check_random_state utility function behavior""" assert_true(check_random_state(None) is np.random.mtrand._rand) assert_true(check_random_state(np.random) is np.random.mtrand._rand) rng_42 = np.random.RandomState(42) assert_true(check_random_state(42).randint(100) == rng_42.randint(100)) rng_42 = np.random.RandomState(42) assert_true(check_random_state(rng_42) is rng_42) rng_42 = np.random.RandomState(42) assert_true(check_random_state(43).randint(100) != rng_42.randint(100)) assert_raises(ValueError, check_random_state, "some invalid seed") def test_resample_noarg(): """Border case not worth mentioning in doctests""" assert_true(resample() is None) def test_deprecated(): """Test whether the deprecated decorator issues appropriate warnings""" # Copied almost verbatim from http://docs.python.org/library/warnings.html # First a function... with warnings.catch_warnings(record=True) as w: warnings.simplefilter("always") @deprecated() def ham(): return "spam" spam = ham() assert_equal(spam, "spam") # function must remain usable assert_equal(len(w), 1) assert_true(issubclass(w[0].category, DeprecationWarning)) assert_true("deprecated" in str(w[0].message).lower()) # ... then a class. with warnings.catch_warnings(record=True) as w: warnings.simplefilter("always") @deprecated("don't use this") class Ham(object): SPAM = 1 ham = Ham() assert_true(hasattr(ham, "SPAM")) assert_equal(len(w), 1) assert_true(issubclass(w[0].category, DeprecationWarning)) assert_true("deprecated" in str(w[0].message).lower()) def test_resample_value_errors(): """Check that invalid arguments yield ValueError""" assert_raises(ValueError, resample, [0], [0, 1]) assert_raises(ValueError, resample, [0, 1], [0, 1], n_samples=3) assert_raises(ValueError, resample, [0, 1], [0, 1], meaning_of_life=42) def test_safe_mask(): random_state = check_random_state(0) X = random_state.rand(5, 4) X_csr = sp.csr_matrix(X) mask = [False, False, True, True, True] mask = safe_mask(X, mask) assert_equal(X[mask].shape[0], 3) mask = safe_mask(X_csr, mask) assert_equal(X_csr[mask].shape[0], 3) def test_pinvh_simple_real(): a = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 10]], dtype=np.float64) a = np.dot(a, a.T) a_pinv = pinvh(a) assert_almost_equal(np.dot(a, a_pinv), np.eye(3)) def test_pinvh_nonpositive(): a = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]], dtype=np.float64) a = np.dot(a, a.T) u, s, vt = np.linalg.svd(a) s[0] *= -1 a = np.dot(u * s, vt) # a is now symmetric non-positive and singular a_pinv = pinv2(a) a_pinvh = pinvh(a) assert_almost_equal(a_pinv, a_pinvh) def test_pinvh_simple_complex(): a = (np.array([[1, 2, 3], [4, 5, 6], [7, 8, 10]]) + 1j * np.array([[10, 8, 7], [6, 5, 4], [3, 2, 1]])) a = np.dot(a, a.conj().T) a_pinv = pinvh(a) assert_almost_equal(np.dot(a, a_pinv), np.eye(3)) def test_column_or_1d(): EXAMPLES = [ ("binary", ["spam", "egg", "spam"]), ("binary", [0, 1, 0, 1]), ("continuous", np.arange(10) / 20.), ("multiclass", [1, 2, 3]), ("multiclass", [0, 1, 2, 2, 0]), ("multiclass", [[1], [2], [3]]), ("multilabel-indicator", [[0, 1, 0], [0, 0, 1]]), ("multiclass-multioutput", [[1, 2, 3]]), ("multiclass-multioutput", [[1, 1], [2, 2], [3, 1]]), ("multiclass-multioutput", [[5, 1], [4, 2], [3, 1]]), ("multiclass-multioutput", [[1, 2, 3]]), ("continuous-multioutput", np.arange(30).reshape((-1, 3))), ] for y_type, y in EXAMPLES: if y_type in ["binary", 'multiclass', "continuous"]: assert_array_equal(column_or_1d(y), np.ravel(y)) else: assert_raises(ValueError, column_or_1d, y) def test_safe_indexing(): X = [[1, 2, 3], [4, 5, 6], [7, 8, 9]] inds = np.array([1, 2]) X_inds = safe_indexing(X, inds) X_arrays = safe_indexing(np.array(X), inds) assert_array_equal(np.array(X_inds), X_arrays) assert_array_equal(np.array(X_inds), np.array(X)[inds]) def test_safe_indexing_pandas(): try: import pandas as pd except ImportError: raise SkipTest("Pandas not found") X = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]) X_df = pd.DataFrame(X) inds = np.array([1, 2]) X_df_indexed = safe_indexing(X_df, inds) X_indexed = safe_indexing(X_df, inds) assert_array_equal(np.array(X_df_indexed), X_indexed) def test_safe_indexing_mock_pandas(): X = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]) X_df = MockDataFrame(X) inds = np.array([1, 2]) X_df_indexed = safe_indexing(X_df, inds) X_indexed = safe_indexing(X_df, inds) assert_array_equal(np.array(X_df_indexed), X_indexed) def test_shuffle_on_ndim_equals_three(): def to_tuple(A): # to make the inner arrays hashable return tuple(tuple(tuple(C) for C in B) for B in A) A = np.array([[[1, 2], [3, 4]], [[5, 6], [7, 8]]]) # A.shape = (2,2,2) S = set(to_tuple(A)) shuffle(A) # shouldn't raise a ValueError for dim = 3 assert_equal(set(to_tuple(A)), S)
bsd-3-clause
timqian/sms-tools
lectures/8-Sound-transformations/plots-code/sineModelFreqScale-orchestra.py
21
2666
import numpy as np import matplotlib.pyplot as plt from scipy.signal import hamming, hanning, triang, blackmanharris, resample import math import sys, os, functools, time sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), '../../../software/models/')) sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), '../../../software/transformations/')) import sineModel as SM import stft as STFT import utilFunctions as UF import sineTransformations as SMT (fs, x) = UF.wavread('../../../sounds/orchestra.wav') w = np.hamming(801) N = 2048 t = -90 minSineDur = .005 maxnSines = 150 freqDevOffset = 20 freqDevSlope = 0.02 Ns = 512 H = Ns/4 mX, pX = STFT.stftAnal(x, fs, w, N, H) tfreq, tmag, tphase = SM.sineModelAnal(x, fs, w, N, H, t, maxnSines, minSineDur, freqDevOffset, freqDevSlope) freqScaling = np.array([0, .8, 1, 1.2]) ytfreq = SMT.sineFreqScaling(tfreq, freqScaling) y = SM.sineModelSynth(ytfreq, tmag, np.array([]), Ns, H, fs) mY, pY = STFT.stftAnal(y, fs, w, N, H) UF.wavwrite(y,fs, 'sineModelFreqScale-orchestra.wav') maxplotfreq = 4000.0 plt.figure(1, figsize=(9.5, 7)) plt.subplot(4,1,1) plt.plot(np.arange(x.size)/float(fs), x, 'b') plt.axis([0,x.size/float(fs),min(x),max(x)]) plt.title('x (orchestra.wav)') plt.subplot(4,1,2) numFrames = int(tfreq[:,0].size) frmTime = H*np.arange(numFrames)/float(fs) tracks = tfreq*np.less(tfreq, maxplotfreq) tracks[tracks<=0] = np.nan plt.plot(frmTime, tracks, color='k', lw=1) plt.autoscale(tight=True) plt.title('sine frequencies') maxplotbin = int(N*maxplotfreq/fs) numFrames = int(mX[:,0].size) frmTime = H*np.arange(numFrames)/float(fs) binFreq = np.arange(maxplotbin+1)*float(fs)/N plt.pcolormesh(frmTime, binFreq, np.transpose(mX[:,:maxplotbin+1])) plt.autoscale(tight=True) plt.subplot(4,1,3) numFrames = int(ytfreq[:,0].size) frmTime = H*np.arange(numFrames)/float(fs) tracks = ytfreq*np.less(ytfreq, maxplotfreq) tracks[tracks<=0] = np.nan plt.plot(frmTime, tracks, color='k', lw=1) plt.autoscale(tight=True) plt.title('freq-scaled sine frequencies') maxplotbin = int(N*maxplotfreq/fs) numFrames = int(mY[:,0].size) frmTime = H*np.arange(numFrames)/float(fs) binFreq = np.arange(maxplotbin+1)*float(fs)/N plt.pcolormesh(frmTime, binFreq, np.transpose(mY[:,:maxplotbin+1])) plt.autoscale(tight=True) plt.subplot(4,1,4) plt.plot(np.arange(y.size)/float(fs), y, 'b') plt.axis([0,y.size/float(fs),min(y),max(y)]) plt.title('y') plt.tight_layout() plt.savefig('sineModelFreqScale-orchestra.png') plt.show()
agpl-3.0
sonusz/PhasorToolBox
examples/freq_meter.py
1
1820
#!/usr/bin/env python3 """ This is an real-time frequency meter of two PMUs. This code connects to two PMUs, plot the frequency of the past 300 time-stamps and update the plot in real-time. """ from phasortoolbox import PDC,Client import matplotlib.pyplot as plt import numpy as np import gc import logging logging.basicConfig(level=logging.DEBUG) class FreqMeter(object): def __init__(self): x = np.linspace(-10.0, 0.0, num=300, endpoint=False) y = [60.0]*300 plt.ion() self.fig = plt.figure() self.ax1 = self.fig.add_subplot(211) self.line1, = self.ax1.plot(x, y) plt.title('PMU1 Frequency Plot') plt.xlabel('Time (s)') plt.ylabel('Freq (Hz)') self.ax2 = self.fig.add_subplot(212) self.line2, = self.ax2.plot(x, y) plt.title('PMU2 Frequency Plot') plt.xlabel('Time (s)') plt.ylabel('Freq (Hz)') plt.tight_layout() def update_plot(self, synchrophasors): y_data = [[],[]] for synchrophasor in synchrophasors: for i, msg in enumerate(synchrophasor): y_data[i].append(msg.data.pmu_data[0].freq) self.line1.set_ydata(y_data[0]) self.line2.set_ydata(y_data[1]) self.ax1.set_ylim(min(y_data[0]),max(y_data[0])) self.ax2.set_ylim(min(y_data[1]),max(y_data[1])) self.fig.canvas.draw() self.fig.canvas.flush_events() del(synchrophasors) gc.collect() if __name__ == '__main__': pmu_client1 = Client(remote_ip='10.0.0.1', remote_port=4722, idcode=1, mode='TCP') pmu_client2 = Client(remote_ip='10.0.0.2', remote_port=4722, idcode=2, mode='TCP') fm = FreqMeter() pdc = PDC(clients=[pmu_client1,pmu_client2],history=300) pdc.callback = fm.update_plot pdc.run()
mit
sgenoud/scikit-learn
sklearn/cluster/tests/test_dbscan.py
3
2890
""" Tests for DBSCAN clustering algorithm """ import pickle import numpy as np from numpy.testing import assert_equal from scipy.spatial import distance from sklearn.cluster.dbscan_ import DBSCAN, dbscan from .common import generate_clustered_data n_clusters = 3 X = generate_clustered_data(n_clusters=n_clusters) def test_dbscan_similarity(): """Tests the DBSCAN algorithm with a similarity array.""" # Parameters chosen specifically for this task. eps = 0.15 min_samples = 10 # Compute similarities D = distance.squareform(distance.pdist(X)) D /= np.max(D) # Compute DBSCAN core_samples, labels = dbscan(D, metric="precomputed", eps=eps, min_samples=min_samples) # number of clusters, ignoring noise if present n_clusters_1 = len(set(labels)) - (1 if -1 in labels else 0) assert_equal(n_clusters_1, n_clusters) db = DBSCAN(metric="precomputed", eps=eps, min_samples=min_samples) labels = db.fit(D).labels_ n_clusters_2 = len(set(labels)) - int(-1 in labels) assert_equal(n_clusters_2, n_clusters) def test_dbscan_feature(): """Tests the DBSCAN algorithm with a feature vector array.""" # Parameters chosen specifically for this task. # Different eps to other test, because distance is not normalised. eps = 0.8 min_samples = 10 metric = 'euclidean' # Compute DBSCAN # parameters chosen for task core_samples, labels = dbscan(X, metric=metric, eps=eps, min_samples=min_samples) # number of clusters, ignoring noise if present n_clusters_1 = len(set(labels)) - int(-1 in labels) assert_equal(n_clusters_1, n_clusters) db = DBSCAN(metric=metric, eps=eps, min_samples=min_samples) labels = db.fit(X).labels_ n_clusters_2 = len(set(labels)) - int(-1 in labels) assert_equal(n_clusters_2, n_clusters) def test_dbscan_callable(): """Tests the DBSCAN algorithm with a callable metric.""" # Parameters chosen specifically for this task. # Different eps to other test, because distance is not normalised. eps = 0.8 min_samples = 10 # metric is the function reference, not the string key. metric = distance.euclidean # Compute DBSCAN # parameters chosen for task core_samples, labels = dbscan(X, metric=metric, eps=eps, min_samples=min_samples) # number of clusters, ignoring noise if present n_clusters_1 = len(set(labels)) - int(-1 in labels) assert_equal(n_clusters_1, n_clusters) db = DBSCAN(metric=metric, eps=eps, min_samples=min_samples) labels = db.fit(X).labels_ n_clusters_2 = len(set(labels)) - int(-1 in labels) assert_equal(n_clusters_2, n_clusters) def test_pickle(): obj = DBSCAN() s = pickle.dumps(obj) assert_equal(type(pickle.loads(s)), obj.__class__)
bsd-3-clause
theandygross/Figures
src/Figures/Boxplots.py
1
11851
""" Created on Apr 24, 2013 @author: agross """ import numpy as np import pandas as pd import matplotlib.pylab as plt import Stats.Scipy as Stats from Figures.FigureHelpers import latex_float, init_ax from Figures.FigureHelpers import prettify_ax from Helpers.Pandas import match_series, true_index colors = plt.rcParams['axes.color_cycle'] * 10 def _violin_plot(ax, data, pos=[], bp=False): """ http://pyinsci.blogspot.com/2009/09/violin-plot-with-matplotlib.html Create violin plots on an axis. Internal to module as it does not use Pandas data-structures. This is split off due to it's being a reuse of the code from the blog-post linked above, and I wanted to keep the original code untouched. """ from scipy.stats import gaussian_kde from numpy import arange # dist = max(pos)-min(pos) dist = len(pos) w = min(0.25 * max(dist, 1.0), 0.5) for p, d in enumerate(data): try: k = gaussian_kde(d) # calculates the kernel density m = k.dataset.min() # lower bound of violin M = k.dataset.max() # upper bound of violin x = arange(m, M, (M - m) / 100.) # support for violin v = k.evaluate(x) # violin profile (density curve) v = v / v.max() * w # scaling the violin to the available space ax.fill_betweenx(x, p, v + p, facecolor='y', alpha=0.1) ax.fill_betweenx(x, p, -v + p, facecolor='y', alpha=0.1) except: pass if bp: box_plot = ax.boxplot(data, notch=1, positions=range(len(pos)), vert=1, widths=.25) return box_plot def box_plot_pandas(bin_vec, real_vec, ax=None, order=None): """ Wrapper around matplotlib's boxplot function. Inputs bin_vec: Series of labels real_vec: Series of measurements to be grouped according to bin_vec """ _, ax = init_ax(ax) bin_vec, real_vec = match_series(bin_vec, real_vec) if order is not None: categories = order else: categories = bin_vec.value_counts().index data = [real_vec[bin_vec == num] for num in categories] bp = ax.boxplot(data, positions=range(len(categories)), widths=.3, patch_artist=True) if real_vec.name: ax.set_ylabel(real_vec.name) if bin_vec.name: ax.set_xlabel(bin_vec.name) ax.set_xticklabels(categories) [p.set_visible(False) for p in bp['fliers']] [p.set_visible(False) for p in bp['caps']] [p.set_visible(False) for p in bp['whiskers']] for p in bp['medians']: p.set_color(colors[0]) p.set_lw(3) p.set_alpha(.8) for i, p in enumerate(bp['boxes']): p.set_color('grey') p.set_lw(3) p.set_alpha(.7) if len(data[i]) < 3: p.set_alpha(0) def violin_plot_pandas(bin_vec, real_vec, ann='p', order=None, ax=None, filename=None): """ http://pyinsci.blogspot.com/2009/09/violin-plot-with-matplotlib.html Wrapper around matplotlib's boxplot function to add violin profile. Inputs bin_vec: Series of labels real_vec: Series of measurements to be grouped according to bin_vec """ fig, ax = init_ax(ax) ax.set_ylabel(real_vec.name) ax.set_xlabel(bin_vec.name) bin_vec, real_vec = match_series(bin_vec, real_vec) try: if order is None: categories = bin_vec.value_counts().index else: categories = order _violin_plot(ax, [real_vec[bin_vec == num] for num in categories], pos=categories, bp=True) ax.set_xticklabels([str(c) + '\n(n=%i)' % sum(bin_vec == c) for c in categories]) except: box_plot_pandas(bin_vec, real_vec, ax=ax) #if type(bin_vec.name) == str: # ax.set_title(str(bin_vec.name) + ' x ' + str(real_vec.name)) p_value = Stats.kruskal_pandas(bin_vec, real_vec)['p'] if ann == 'p_fancy': ax.annotate('$p = {}$'.format(latex_float(p_value)), (.95, -.02), xycoords='axes fraction', ha='right', va='bottom', size=14) if ann == 'p': ax.annotate('p = {0:.1e}'.format(p_value), (.95, .02), xycoords='axes fraction', ha='right', va='bottom', size=12) elif ann is not None: ax.annotate(ann, (.95, .02), xycoords='axes fraction', ha='right', va='bottom', size=12) if filename is not None: fig.savefig(filename) return def violin_plot_series(s, **kw_args): """ Wrapper for drawing a violin plot on a series with a multi-index. The second level of the index is used as the binning variable. """ assert s.index.levshape[1] > 1 violin_plot_pandas(pd.Series(s.index.get_level_values(1), s.index), s, **kw_args) def paired_boxplot_o(boxes): """ Wrapper around plt.boxplot to draw paired boxplots for a set of boxes. Input is the same as plt.boxplot: Array or a sequence of vectors. """ fig = plt.figure(figsize=(len(boxes) / 2.5, 4)) ax1 = fig.add_subplot(111) plt.subplots_adjust(left=0.075, right=0.95, top=0.9, bottom=0.25) bp = ax1.boxplot(boxes, notch=0, positions=np.arange(len(boxes)) + 1.5 * (np.arange(len(boxes)) / 2), patch_artist=True) [p.set_color(colors[0]) for p in bp['boxes'][::2]] [p.set_color('black') for p in bp['whiskers']] [p.set_color('black') for p in bp['fliers']] [p.set_alpha(.4) for p in bp['fliers']] [p.set_alpha(.6) for p in bp['boxes']] [p.set_edgecolor('black') for p in bp['boxes']] ax1.yaxis.grid(True, linestyle='-', which='major', color='lightgrey', alpha=0.5) # Hide these grid behind plot objects ax1.set_axisbelow(True) ax1.set_ylabel('$Log_{2}$ RNA Expression') ax1.set_xticks(3.5 * np.arange(len(boxes) / 2) + .5) return ax1, bp def paired_boxplot(boxes, ax1=None): if not ax1: fig = plt.figure(figsize=(len(boxes) / 2.5, 4)) ax1 = fig.add_subplot(111) plt.subplots_adjust(left=0.075, right=0.95, top=0.9, bottom=0.25) bp = ax1.boxplot(boxes, notch=0, positions=np.arange(len(boxes)) + 1.5 * (np.arange(len(boxes)) / 2), patch_artist=True) [p.set_color(colors[0]) for p in bp['boxes'][::2]] [p.set_color(colors[1]) for p in bp['boxes'][1::2]] [p.set_color('black') for p in bp['whiskers']] [p.set_color('black') for p in bp['fliers']] [p.set_alpha(.4) for p in bp['fliers']] [p.set_alpha(.8) for p in bp['boxes']] [p.set_edgecolor('black') for p in bp['boxes']] ax1.yaxis.grid(True, linestyle='-', which='major', color='lightgrey', alpha=0.5) # Hide these grid behind plot objects ax1.set_axisbelow(True) ax1.set_ylabel('$Log_{2}$ RNA Expression') ax1.set_xticks(3.5 * np.arange(len(boxes) / 2) + .5) return ax1, bp def paired_boxplot_tumor_normal(df, sig=True, cutoffs=[.01, .00001], order=None, ax=None): """ Draws a paired boxplot given a DataFrame with both tumor and normal samples on the index. '01' and '11' are hard-coded as the ids for tumor/normal. """ n = df.groupby(level=0).size() == 2 df = df.ix[n[n].index] if order is None: o = df.xs('11', level=1).median().order().index df = df[o[::-1]] else: df = df[order] l1 = list(df.xs('01', level=1).as_matrix().T) l2 = list(df.xs('11', level=1).as_matrix().T) boxes = [x for t in zip(l1, l2) for x in t] ax1, bp = paired_boxplot(boxes, ax) test = lambda v: Stats.ttest_rel(v.unstack()['01'], v.unstack()['11']) res = df.apply(test).T p = res.p if sig: pts = [(i * 3.5 + .5, 18) for i, n in enumerate(p) if n < cutoffs[1]] if len(pts) > 0: s1 = ax1.scatter(*zip(*pts), marker='$**$', label='$p<10^{-5}$', s=200) else: s1 = None pts = [(i * 3.5 + .5, 18) for i, n in enumerate(p) if (n < cutoffs[0]) and (n > cutoffs[1])] if len(pts) > 0: s2 = ax1.scatter(*zip(*pts), marker='$*$', label='$p<10^{-2}$', s=30) else: s2 = None ax1.legend(bp['boxes'][:2] + [s2, s1], ('Tumor', 'Normal', '$p<10^{-2}$', '$p<10^{-5}$'), loc='best', scatterpoints=1) else: ax1.legend(bp['boxes'][:2], ('Tumor', 'Normal'), loc='best') ax1.set_xticklabels(df.columns) def boxplot_panel(hit_vec, response_df): """ Draws a series of paired boxplots with the rows of the response_df split according to hit_vec. """ b = response_df.copy() b.columns = pd.MultiIndex.from_arrays([b.columns, hit_vec.ix[b.columns]]) b = b.T v1, v2 = hit_vec.unique() test = lambda v: Stats.anova(v.reset_index(level=1)[v.index.names[1]], v.reset_index(level=1)[v.name]) res = b.apply(test).T p = res.p.order() b = b.ix[:, p.index] l1 = list(b.xs(v1, level=1).as_matrix().T) l2 = list(b.xs(v2, level=1).as_matrix().T) boxes = [x for t in zip(l1, l2) for x in t] ax1, bp = paired_boxplot(boxes) y_lim = (response_df.T.quantile(.9).max()) * 1.2 pts = [(i * 3.5 + .5, y_lim) for i, n in enumerate(p) if n < .00001] if len(pts) > 0: s1 = ax1.scatter(*zip(*pts), marker='$**$', label='$p<10^{-5}$', s=200) else: s1 = None pts = [(i * 3.5 + .5, y_lim) for i, n in enumerate(p) if (n < .01) and (n > .00001)] if len(pts) > 0: s2 = ax1.scatter(*zip(*pts), marker='$*$', label='$p<10^{-2}$', s=30) else: s2 = None ax1.set_xticklabels(b.columns) ax1.legend(bp['boxes'][:2] + [s2, s1], (v1, v2, '$p<10^{-2}$', '$p<10^{-5}$'), loc='best', scatterpoints=1) def paired_bp_tn_split(vec, assignment, ax=None, split_vals=('01', '11'), data_type='gene expression'): """ Paired boxplot for a single Series, with splitting on the index, grouped by assignment. I.E. Tumor-Normal gene expression split by cancer. vec: vector of values to plot. assignment: vector mapping keys to group assignment ax (None): matplotlib axis to plot on or None split_vals ('01','11'): Values to split the boxplot pairing on. The default of ('01','11') indicates tumor vs. normal in the standard TCGA barcode nomenclature. This should coorespond to values on the second level of the index for vec and assignment. **both vec and assignment should have an overlapping index with multiple levels** """ _, ax = init_ax(ax, figsize=(8, 3)) if vec.name != None: label = vec.name # lose label in manipulation else: label = '' g1 = split_vals[0] g2 = split_vals[1] vec = pd.concat([vec[:, g1], vec[:, g2]], keys=[g1, g2], axis=1) vec = vec.dropna().stack() counts = vec.unstack().groupby(assignment).size() groups = list(true_index(counts > 5)) groups = vec.unstack().groupby(assignment).median()[g1].ix[groups] groups = groups.order().index[::-1] l1 = [np.array(vec[:, g1].ix[true_index(assignment == c)].dropna()) for c in groups] l2 = [np.array(vec[:, g2].ix[true_index(assignment == c)].dropna()) for c in groups] boxes = [x for t in zip(l1, l2) for x in t if len(t[1]) > 5] ax, bp = paired_boxplot(boxes, ax) labels = ['{}\n({})'.format(c, counts[c]) for c in groups] ax.set_xticklabels(labels) prettify_ax(ax) ax.set_ylabel('{} {}'.format(label, data_type))
mit
duncanmmacleod/gwpy
gwpy/plot/axes.py
1
21895
# -*- coding: utf-8 -*- # Copyright (C) Duncan Macleod (2018-2020) # # This file is part of GWpy. # # GWpy is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # GWpy is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with GWpy. If not, see <http://www.gnu.org/licenses/>. """Extension of `~matplotlib.axes.Axes` for gwpy """ import warnings from functools import wraps from math import log from numbers import Number import numpy from astropy.time import Time from matplotlib import rcParams from matplotlib.artist import allow_rasterization from matplotlib.axes import Axes as _Axes from matplotlib.axes._base import _process_plot_var_args from matplotlib.collections import PolyCollection from matplotlib.lines import Line2D from matplotlib.projections import register_projection from . import (Plot, colorbar as gcbar) from .colors import format_norm from .gps import GPS_SCALES from .legend import HandlerLine2D from ..time import to_gps __author__ = 'Duncan Macleod <duncan.macleod@ligo.org>' def log_norm(func): """Wrap ``func`` to handle custom gwpy keywords for a LogNorm colouring """ @wraps(func) def decorated_func(*args, **kwargs): norm, kwargs = format_norm(kwargs) kwargs['norm'] = norm return func(*args, **kwargs) return decorated_func def xlim_as_gps(func): """Wrap ``func`` to handle pass limit inputs through `gwpy.time.to_gps` """ @wraps(func) def wrapped_func(self, left=None, right=None, **kw): if right is None and numpy.iterable(left): left, right = left kw['left'] = left kw['right'] = right gpsscale = self.get_xscale() in GPS_SCALES for key in ('left', 'right'): if gpsscale: try: kw[key] = numpy.longdouble(str(to_gps(kw[key]))) except TypeError: pass return func(self, **kw) return wrapped_func def restore_grid(func): """Wrap ``func`` to preserve the Axes current grid settings. """ @wraps(func) def wrapped_func(self, *args, **kwargs): try: grid = ( self.xaxis._minor_tick_kw["gridOn"], self.xaxis._major_tick_kw["gridOn"], self.yaxis._minor_tick_kw["gridOn"], self.yaxis._major_tick_kw["gridOn"], ) except KeyError: # matplotlib < 3.3.3 grid = (self.xaxis._gridOnMinor, self.xaxis._gridOnMajor, self.yaxis._gridOnMinor, self.yaxis._gridOnMajor) try: return func(self, *args, **kwargs) finally: # reset grid self.xaxis.grid(grid[0], which="minor") self.xaxis.grid(grid[1], which="major") self.yaxis.grid(grid[2], which="minor") self.yaxis.grid(grid[3], which="major") return wrapped_func # -- new Axes ----------------------------------------------------------------- class Axes(_Axes): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) # handle Series in `ax.plot()` self._get_lines = PlotArgsProcessor(self) # reset data formatters (for interactive plots) to support # GPS time display self.fmt_xdata = self._fmt_xdata self.fmt_ydata = self._fmt_ydata @allow_rasterization def draw(self, *args, **kwargs): labels = {} for ax in (self.xaxis, self.yaxis): if ax.get_scale() in GPS_SCALES and ax.isDefault_label: labels[ax] = ax.get_label_text() trans = ax.get_transform() epoch = float(trans.get_epoch()) unit = trans.get_unit_name() iso = Time(epoch, format='gps', scale='utc').iso utc = iso.rstrip('0').rstrip('.') ax.set_label_text('Time [{0!s}] from {1!s} UTC ({2!r})'.format( unit, utc, epoch)) try: super().draw(*args, **kwargs) finally: for ax in labels: # reset labels ax.isDefault_label = True # -- auto-gps helpers ----------------------- def _fmt_xdata(self, x): if self.get_xscale() in GPS_SCALES: return str(to_gps(x)) return self.xaxis.get_major_formatter().format_data_short(x) def _fmt_ydata(self, y): if self.get_yscale() in GPS_SCALES: return str(to_gps(y)) return self.yaxis.get_major_formatter().format_data_short(y) set_xlim = xlim_as_gps(_Axes.set_xlim) def set_epoch(self, epoch): """Set the epoch for the current GPS scale. This method will fail if the current X-axis scale isn't one of the GPS scales. See :ref:`gwpy-plot-gps` for more details. Parameters ---------- epoch : `float`, `str` GPS-compatible time or date object, anything parseable by :func:`~gwpy.time.to_gps` is fine. """ scale = self.get_xscale() return self.set_xscale(scale, epoch=epoch) def get_epoch(self): """Return the epoch for the current GPS scale/ This method will fail if the current X-axis scale isn't one of the GPS scales. See :ref:`gwpy-plot-gps` for more details. """ return self.get_xaxis().get_transform().get_epoch() # -- overloaded plotting methods ------------ def scatter(self, x, y, c=None, **kwargs): # scatter with auto-sorting by colour try: if c is None: raise ValueError c_array = numpy.asanyarray(c, dtype=float) except ValueError: # no colour array pass else: c_sort = kwargs.pop('c_sort', True) if c_sort: sortidx = c_array.argsort() x = numpy.asarray(x)[sortidx] y = numpy.asarray(y)[sortidx] c = numpy.asarray(c)[sortidx] return super().scatter(x, y, c=c, **kwargs) scatter.__doc__ = _Axes.scatter.__doc__.replace( 'marker :', 'c_sort : `bool`, optional, default: True\n' ' Sort scatter points by `c` array value, if given.\n\n' 'marker :', ) @log_norm def imshow(self, array, *args, **kwargs): """Display an image, i.e. data on a 2D regular raster. If ``array`` is a :class:`~gwpy.types.Array2D` (e.g. a :class:`~gwpy.spectrogram.Spectrogram`), then the defaults are _different_ to those in the upstream :meth:`~matplotlib.axes.Axes.imshow` method. Namely, the defaults are - ``origin='lower'`` (coordinates start in lower-left corner) - ``aspect='auto'`` (pixels are not forced to be square) - ``interpolation='none'`` (no image interpolation is used) In all other usage, the defaults from the upstream matplotlib method are unchanged. Parameters ---------- array : array-like or PIL image The image data. *args, **kwargs All arguments and keywords are passed to the inherited :meth:`~matplotlib.axes.Axes.imshow` method. See also -------- matplotlib.axes.Axes.imshow for details of the image rendering """ if hasattr(array, "yspan"): # Array2D return self._imshow_array2d(array, *args, **kwargs) image = super().imshow(array, *args, **kwargs) self.autoscale(enable=None, axis='both', tight=None) return image def _imshow_array2d(self, array, origin='lower', interpolation='none', aspect='auto', **kwargs): """Render an `~gwpy.types.Array2D` using `Axes.imshow` """ # NOTE: If you change the defaults for this method, please update # the docstring for `imshow` above. # calculate extent extent = tuple(array.xspan) + tuple(array.yspan) if self.get_xscale() == 'log' and extent[0] == 0.: extent = (1e-300,) + extent[1:] if self.get_yscale() == 'log' and extent[2] == 0.: extent = extent[:2] + (1e-300,) + extent[3:] kwargs.setdefault('extent', extent) return self.imshow(array.value.T, origin=origin, aspect=aspect, interpolation=interpolation, **kwargs) @restore_grid @log_norm def pcolormesh(self, *args, **kwargs): """Create a pseudocolor plot with a non-regular rectangular grid. When using GWpy, this method can be called with a single argument that is an :class:`~gwpy.types.Array2D`, for which the ``X`` and ``Y`` coordinate arrays will be determined from the indexing. In all other usage, all ``args`` and ``kwargs`` are passed directly to :meth:`~matplotlib.axes.Axes.pcolormesh`. Notes ----- Unlike the upstream :meth:`matplotlib.axes.Axes.pcolormesh`, this method respects the current grid settings. See also -------- matplotlib.axes.Axes.pcolormesh """ if len(args) == 1 and hasattr(args[0], "yindex"): # Array2D return self._pcolormesh_array2d(*args, **kwargs) return super().pcolormesh(*args, **kwargs) def _pcolormesh_array2d(self, array, *args, **kwargs): """Render an `~gwpy.types.Array2D` using `Axes.pcolormesh` """ x = numpy.concatenate((array.xindex.value, array.xspan[-1:])) y = numpy.concatenate((array.yindex.value, array.yspan[-1:])) xcoord, ycoord = numpy.meshgrid(x, y, copy=False, sparse=True) return self.pcolormesh(xcoord, ycoord, array.value.T, *args, **kwargs) def hist(self, x, *args, **kwargs): x = numpy.asarray(x) # re-format weights as array if given as float weights = kwargs.get('weights', None) if isinstance(weights, Number): kwargs['weights'] = numpy.ones_like(x) * weights # calculate log-spaced bins on-the-fly if (kwargs.pop('logbins', False) and not numpy.iterable(kwargs.get('bins', None))): nbins = kwargs.get('bins', None) or rcParams.get('hist.bins', 30) # get range hrange = kwargs.pop('range', None) if hrange is None: try: hrange = numpy.min(x), numpy.max(x) except ValueError as exc: if str(exc).startswith('zero-size array'): # no data exc.args = ('cannot generate log-spaced histogram ' 'bins for zero-size array, ' 'please pass `bins` or `range` manually',) raise # log-scale the axis and extract the base if kwargs.get('orientation') == 'horizontal': self.set_yscale('log', nonposy='clip') logbase = self.yaxis._scale.base else: self.set_xscale('log', nonposx='clip') logbase = self.xaxis._scale.base # generate the bins kwargs['bins'] = numpy.logspace( log(hrange[0], logbase), log(hrange[1], logbase), nbins+1, endpoint=True) return super().hist(x, *args, **kwargs) hist.__doc__ = _Axes.hist.__doc__.replace( 'color :', 'logbins : boolean, optional\n' ' If ``True``, use logarithmically-spaced histogram bins.\n\n' ' Default is ``False``\n\n' 'color :') # -- new plotting methods ------------------- def plot_mmm(self, data, lower=None, upper=None, **kwargs): """Plot a `Series` as a line, with a shaded region around it. The ``data`` `Series` is drawn, while the ``lower`` and ``upper`` `Series` are plotted lightly below and above, with a fill between them and the ``data``. All three `Series` should have the same `~Series.index` array. Parameters ---------- data : `~gwpy.types.Series` Data to plot normally. lower : `~gwpy.types.Series` Lower boundary (on Y-axis) for shade. upper : `~gwpy.types.Series` Upper boundary (on Y-axis) for shade. **kwargs Any other keyword arguments acceptable for :meth:`~matplotlib.Axes.plot`. Returns ------- artists : `tuple` All of the drawn artists: - `~matplotlib.lines.Line2d` for ``data``, - `~matplotlib.lines.Line2D` for ``lower``, if given - `~matplotlib.lines.Line2D` for ``upper``, if given - `~matplitlib.collections.PolyCollection` for shading See also -------- matplotlib.axes.Axes.plot for a full description of acceptable ``*args`` and ``**kwargs`` """ alpha = kwargs.pop('alpha', .1) # plot mean line, = self.plot(data, **kwargs) out = [line] # modify keywords for shading kwargs.update({ 'label': '', 'linewidth': line.get_linewidth() / 2, 'color': line.get_color(), 'alpha': alpha * 2, }) # plot lower and upper Series fill = [data.xindex.value, data.value, data.value] for i, bound in enumerate((lower, upper)): if bound is not None: out.extend(self.plot(bound, **kwargs)) fill[i+1] = bound.value # fill between out.append(self.fill_between( *fill, alpha=alpha, color=kwargs['color'], rasterized=kwargs.get('rasterized', True))) return out def tile(self, x, y, w, h, color=None, anchor='center', edgecolors='face', linewidth=0.8, **kwargs): """Plot rectanguler tiles based onto these `Axes`. ``x`` and ``y`` give the anchor point for each tile, with ``w`` and ``h`` giving the extent in the X and Y axis respectively. Parameters ---------- x, y, w, h : `array_like`, shape (n, ) Input data color : `array_like`, shape (n, ) Array of amplitudes for tile color anchor : `str`, optional Anchor point for tiles relative to ``(x, y)`` coordinates, one of - ``'center'`` - center tile on ``(x, y)`` - ``'ll'`` - ``(x, y)`` defines lower-left corner of tile - ``'lr'`` - ``(x, y)`` defines lower-right corner of tile - ``'ul'`` - ``(x, y)`` defines upper-left corner of tile - ``'ur'`` - ``(x, y)`` defines upper-right corner of tile **kwargs Other keywords are passed to :meth:`~matplotlib.collections.PolyCollection` Returns ------- collection : `~matplotlib.collections.PolyCollection` the collection of tiles drawn Examples -------- >>> import numpy >>> from matplotlib import pyplot >>> import gwpy.plot # to get gwpy's Axes >>> x = numpy.arange(10) >>> y = numpy.arange(x.size) >>> w = numpy.ones_like(x) * .8 >>> h = numpy.ones_like(x) * .8 >>> fig = pyplot.figure() >>> ax = fig.gca() >>> ax.tile(x, y, w, h, anchor='ll') >>> pyplot.show() """ # get color and sort if color is not None and kwargs.get('c_sort', True): sortidx = color.argsort() x = x[sortidx] y = y[sortidx] w = w[sortidx] h = h[sortidx] color = color[sortidx] # define how to make a polygon for each tile if anchor == 'll': def _poly(x, y, w, h): return ((x, y), (x, y+h), (x+w, y+h), (x+w, y)) elif anchor == 'lr': def _poly(x, y, w, h): return ((x-w, y), (x-w, y+h), (x, y+h), (x, y)) elif anchor == 'ul': def _poly(x, y, w, h): return ((x, y-h), (x, y), (x+w, y), (x+w, y-h)) elif anchor == 'ur': def _poly(x, y, w, h): return ((x-w, y-h), (x-w, y), (x, y), (x, y-h)) elif anchor == 'center': def _poly(x, y, w, h): return ((x-w/2., y-h/2.), (x-w/2., y+h/2.), (x+w/2., y+h/2.), (x+w/2., y-h/2.)) else: raise ValueError("Unrecognised tile anchor {!r}".format(anchor)) # build collection cmap = kwargs.pop('cmap', rcParams['image.cmap']) coll = PolyCollection((_poly(*tile) for tile in zip(x, y, w, h)), edgecolors=edgecolors, linewidth=linewidth, **kwargs) if color is not None: coll.set_array(color) coll.set_cmap(cmap) out = self.add_collection(coll) self.autoscale_view() return out # -- overloaded auxiliary methods ----------- def legend(self, *args, **kwargs): # handle deprecated keywords linewidth = kwargs.pop("linewidth", None) if linewidth: warnings.warn( "the linewidth keyword to gwpy.plot.Axes.legend has been " "deprecated and will be removed in a future release; " "please update your code to use a custom legend handler, " "e.g. gwpy.plot.legend.HandlerLine2D.", DeprecationWarning, ) alpha = kwargs.pop("alpha", None) if alpha: kwargs.setdefault("framealpha", alpha) warnings.warn( "the alpha keyword to gwpy.plot.Axes.legend has been " "deprecated and will be removed in a future release; " "use framealpha instead.", DeprecationWarning, ) # build custom handler handler_map = kwargs.setdefault("handler_map", dict()) if isinstance(handler_map, dict): handler_map.setdefault(Line2D, HandlerLine2D(linewidth or 6)) # create legend return super().legend(*args, **kwargs) legend.__doc__ = _Axes.legend.__doc__.replace( "Call signatures", """.. note:: This method uses a custom default legend handler for `~matplotlib.lines.Line2D` objects, with increased linewidth relative to the upstream :meth:`~matplotlib.axes.Axes.legend` method. To disable this, pass ``handler_map=None``, or create and pass your own handler class. See :ref:`gwpy-plot-legend` for more details. Call signatures""", ) def colorbar(self, mappable=None, **kwargs): """Add a `~matplotlib.colorbar.Colorbar` to these `Axes` Parameters ---------- mappable : matplotlib data collection, optional collection against which to map the colouring, default will be the last added mappable artist (collection or image) fraction : `float`, optional fraction of space to steal from these `Axes` to make space for the new axes, default is ``0.`` if ``use_axesgrid=True`` is given (default), otherwise default is ``.15`` to match the upstream matplotlib default. **kwargs other keyword arguments to be passed to the :meth:`Plot.colorbar` generator Returns ------- cbar : `~matplotlib.colorbar.Colorbar` the newly added `Colorbar` See also -------- Plot.colorbar """ fig = self.get_figure() if kwargs.get('use_axesgrid', True): kwargs.setdefault('fraction', 0.) if kwargs.get('fraction', 0.) == 0.: kwargs.setdefault('use_axesgrid', True) mappable, kwargs = gcbar.process_colorbar_kwargs( fig, mappable=mappable, ax=self, **kwargs) if isinstance(fig, Plot): # either we have created colorbar Axes using axesgrid1, or # the user already gave use_axesgrid=False, so we forcefully # disable axesgrid here in case fraction == 0., which causes # gridspec colorbars to fail. kwargs['use_axesgrid'] = False return fig.colorbar(mappable, **kwargs) # override default Axes with this one by registering a projection with the # same name register_projection(Axes) # -- overload Axes.plot() to handle Series ------------------------------------ class PlotArgsProcessor(_process_plot_var_args): """This class controls how ax.plot() works """ def __call__(self, *args, **kwargs): """Find `Series` data in `plot()` args and unwrap """ newargs = [] while args: # strip first argument this, args = args[:1], args[1:] # it its a 1-D Series, then parse it as (xindex, value) if hasattr(this[0], "xindex") and this[0].ndim == 1: this = (this[0].xindex.value, this[0].value) # otherwise treat as normal (must be a second argument) else: this += args[:1] args = args[1:] # allow colour specs if args and isinstance(args[0], str): this += args[0], args = args[1:] newargs.extend(this) return super().__call__(*newargs, **kwargs)
gpl-3.0
openfisca/openfisca-france-indirect-taxation
openfisca_france_indirect_taxation/examples/transports/plot_legislation/plot_ticpe_taux_implicite.py
4
2264
# -*- coding: utf-8 -*- """ Created on Mon Aug 17 18:06:45 2015 @author: thomas.douenne TICPE: Taxe intérieure sur la consommation des produits énergétiques """ # L'objectif de ce script est d'illustrer graphiquement l'évolution du taux implicite de la TICPE depuis 1993. # On étudie ce taux pour le diesel, et pour les carburants sans plombs. # Import de modules généraux from pandas import concat # Import de modules spécifiques à Openfisca from openfisca_france_indirect_taxation.examples.utils_example import graph_builder_bar_list from openfisca_france_indirect_taxation.examples.dataframes_from_legislation.get_accises import get_accises_carburants from openfisca_france_indirect_taxation.examples.dataframes_from_legislation.get_tva import get_tva_taux_plein from openfisca_france_indirect_taxation.examples.dataframes_from_legislation.get_prix_carburants import \ get_prix_carburants # Appel des paramètres de la législation et des prix ticpe = ['ticpe_gazole', 'ticpe_super9598'] accise_diesel = get_accises_carburants(ticpe) prix_ttc = ['diesel_ttc', 'super_95_ttc'] prix_carburants = get_prix_carburants(prix_ttc) tva_taux_plein = get_tva_taux_plein() # Création d'une dataframe contenant ces paramètres df_taux_implicite = concat([accise_diesel, prix_carburants, tva_taux_plein], axis = 1) df_taux_implicite.rename(columns = {'value': 'taux plein tva'}, inplace = True) # A partir des paramètres, calcul des taux de taxation implicites df_taux_implicite['taux_implicite_diesel'] = ( df_taux_implicite['accise ticpe gazole'] * (1 + df_taux_implicite['taux plein tva']) / (df_taux_implicite['prix diesel ttc'] - (df_taux_implicite['accise ticpe gazole'] * (1 + df_taux_implicite['taux plein tva']))) ) df_taux_implicite['taux_implicite_sp95'] = ( df_taux_implicite['accise ticpe super9598'] * (1 + df_taux_implicite['taux plein tva']) / (df_taux_implicite['prix super 95 ttc'] - (df_taux_implicite['accise ticpe super9598'] * (1 + df_taux_implicite['taux plein tva']))) ) df_taux_implicite = df_taux_implicite.dropna() # Réalisation des graphiques graph_builder_bar_list(df_taux_implicite['taux_implicite_diesel'], 1, 1) graph_builder_bar_list(df_taux_implicite['taux_implicite_sp95'], 1, 1)
agpl-3.0
hiuwo/acq4
acq4/analysis/tools/Fitting.py
1
36006
#!/usr/bin/env python """ Python class wrapper for data fitting. Includes the following external methods: getFunctions returns the list of function names (dictionary keys) FitRegion performs the fitting Note that FitRegion will plot on top of the current data using MPlots routines if the current curve and the current plot instance are passed. """ # January, 2009 # Paul B. Manis, Ph.D. # UNC Chapel Hill # Department of Otolaryngology/Head and Neck Surgery # Supported by NIH Grants DC000425-22 and DC004551-07 to PBM. # Copyright Paul Manis, 2009 # """ This program is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program. If not, see <http://www.gnu.org/licenses/>. """ """ Additional Terms: The author(s) would appreciate that any modifications to this program, or corrections of erros, be reported to the principal author, Paul Manis, at pmanis@med.unc.edu, with the subject line "PySounds Modifications". Note: This program also relies on the TrollTech Qt libraries for the GUI. You must obtain these libraries from TrollTech directly, under their license to use the program. """ import sys import numpy import scipy try: import openopt HAVE_OPENOPT = True except ImportError: HAVE_OPENOPT = False print "There was an error importing openopt. Continuing...." import ctypes import numpy.random #from numba import autojit usingMPlot = False if usingMPlot: import MPlot # we include plotting as part of the fitting def debug_trace(): '''Set a tracepoint in the Python debugger that works with Qt''' if pyqt: from PyQt4.QtCore import pyqtRemoveInputHook from pdb import set_trace if pyqt: pyqtRemoveInputHook() set_trace() class Fitting(): # dictionary contains: # name of function: function call, initial parameters, iterations, plot color, then x and y for testing # target valutes, names of parameters, contant values, and derivative function if needed. # def __init__(self): self.fitfuncmap = { 'exp0' : (self.exp0eval, [0.0, 20.0], 2000, 'k', [0, 100, 1.], [1.0, 5.0], ['A0', 'tau'], None, None), 'exp1' : (self.expeval, [0.0, 0.0, 20.0], 2000, 'k', [0, 100, 1.], [0.5, 1.0, 5.0], ['DC', 'A0', 'tau'], None, self.expevalprime), 'expsum' : (self.expsumeval, [0.0, -0.5, 200.0, -0.25, 450.0], 500000, 'k', [0, 1000, 1.], [0.0, -1.0, 150.0, -0.25, 350.0], ['DC', 'A0', 'tau0', 'A1', 'tau1'], None, None), 'expsum2' : (self.expsumeval2, [0., -0.5, -0.250], 50000, 'k', [0, 1000, 1.], [0., -0.5, -0.25], ['A0', 'A1'], [5., 20.], None), 'exp2' : (self.exp2eval, [0.0, -0.5, 200.0, -0.25, 450.0], 500000, 'k', [0, 1000, 1.], [0.0, -1.0, 150.0, -0.25, 350.0], ['DC', 'A0', 'tau0', 'A1', 'tau1'], None, None), 'exppow' : (self.exppoweval, [0.0, 1.0, 100, ], 2000, 'k', [0, 100, 0.1], [0.0, 1.0, 100.0], ['DC', 'A0', 'tau'], None, None), 'exppulse' : (self.expPulse, [3.0, 2.5, 0.2, 2.5, 2.0, 0.5], 2000, 'k', [0, 10, 0.3], [0.0, 0., 0.75, 4., 1.5, 1.], ['DC', 't0', 'tau1', 'tau2', 'amp', 'width'], None, None), 'boltz' : (self.boltzeval, [0.0, 1.0, -50.0, -5.0], 5000, 'r', [-130., -30., 1.], [0.00, 0.010, -100.0, 7.0], ['DC', 'A0', 'x0', 'k'], None, None), 'gauss' : (self.gausseval, [1.0, 0.0, 0.5], 2000, 'y', [-10., 10., 0.2], [1.0, 1.0, 2.0], ['A', 'mu', 'sigma'], None, None), 'line' : (self.lineeval, [1.0, 0.0], 500, 'r', [-10., 10., 0.5], [0.0, 2.0], ['m', 'b'], None, None), 'poly2' : (self.poly2eval, [1.0, 1.0, 0.0], 500, 'r', [0, 100, 1.], [0.5, 1.0, 5.0], ['a', 'b', 'c'], None, None), 'poly3' : (self.poly3eval, [1.0, 1.0, 1.0, 0.0], 1000, 'r', [0., 100., 1.], [0.5, 1.0, 5.0, 2.0], ['a', 'b', 'c', 'd'], None, None), 'poly4' : (self.poly4eval, [1.0, 1.0, 1.0, 1.0, 0.0], 1000, 'r', [0., 100., 1.], [0.1, 0.5, 1.0, 5.0, 2.0], ['a', 'b', 'c', 'd', 'e'], None, None), 'sin' : (self.sineeval, [-1., 1.0, 4.0, 0.0], 1000, 'r', [0., 100., 0.2], [0.0, 1.0, 9.0, 0.0], ['DC', 'A', 'f', 'phi'], None, None), 'boltz2' : (self.boltzeval2, [0.0, 0.5, -50.0, 5.0, 0.5, -20.0, 3.0], 1200, 'r', [-100., 50., 1.], [0.0, 0.3, -45.0, 4.0, 0.7, 10.0, 12.0], ['DC', 'A1', 'x1', 'k1', 'A2', 'x2', 'k2'], None, None), 'taucurve' : (self.taucurve, [50., 300.0, 60.0, 10.0, 8.0, 65.0, 10.0], 50000, 'r', [-150., 50., 1.], [0.0, 237.0, 60.0, 12.0, 17.0, 60.0, 14.0], ['DC', 'a1', 'v1', 'k1', 'a2', 'v2', 'k2'], None, self.taucurveder), } self.fitSum2Err = 0 def getFunctions(self): return(self.fitfuncmap.keys()) def exp0eval(self, p, x, y=None, C = None, sumsq = False): """ Exponential function with an amplitude and 0 offset """ yd = p[0] * numpy.exp(-x/p[1]) if y is None: return yd else: if sumsq is True: return numpy.sum((y - yd)**2) else: return y - yd def expsumeval(self, p, x, y=None, C = None, sumsq = False, weights=None): """ Sum of two exponentials with independent time constants and amplitudes, and a DC offset """ yd = p[0] + (p[1]* numpy.exp(-x/p[2])) + (p[3]*numpy.exp(-x/p[4])) if y is None: return yd else: yerr = y - yd if weights is not None: yerr = yerr * weights if sumsq is True: return numpy.sum(yerr**2) else: return yerr def expsumeval2(self, p, x, y=None, C = None, sumsq = False, weights=None): """ Sum of two exponentials, with predefined time constants , allowing only the amplitudes and DC offset to vary """ yd = p[0] + (p[1]* numpy.exp(-x/C[0])) + (p[2]*numpy.exp(-x/C[1])) if y is None: return yd else: if sumsq is True: return numpy.sum((y - yd)**2) else: return y - yd def expeval(self, p, x, y=None, C = None, sumsq = False, weights=None): """ Exponential with offset """ yd = p[0] + p[1] * numpy.exp(-x/p[2]) # print yd.shape # print y.shape if y is None: return yd else: if sumsq is True: return numpy.sum((y - yd)**2) else: return y - yd def expevalprime(self, p, x, y=None, C = None, sumsq = False, weights=None): """ Derivative for exponential with offset """ ydp = p[1] * numpy.exp(-x/p[2])/(p[2]*p[2]) yd = p[0] + p[1] * numpy.exp(-x/p[2]) print y if y is None: return (yd, ydp) else: if sumsq is True: return numpy.sum((y - yd)**2) else: return y - yd def exppoweval(self, p, x, y=None, C = None, sumsq = False, weights=None): """ Single exponential function, rising to a ppower """ if C is None: cx = 1.0 else: cx = C[0] yd = p[0] + p[1] * (1.0-numpy.exp(-x/p[2]))**cx if y is None: return yd else: if sumsq is True: return numpy.sum((y - yd)**2) else: return y - yd def exp2eval(self, p, x, y=None, C = None, sumsq = False, weights=None): """ For fit to activation currents... """ yd = p[0] + (p[1] * (1.0 - numpy.exp(-x/p[2]))**2.0 ) + (p[3] * (1.0 - numpy.exp(-x/p[4]))) if y == None: return yd else: if sumsq is True: ss = numpy.sqrt(numpy.sum((y - yd)**2.0)) # if p[4] < 3.0*p[2]: # ss = ss*1e6 # penalize them being too close return ss else: return y - yd # @autojit def expPulse(self, p, x, y=None, C=None, sumsq = False, weights = None): """Exponential pulse function (rising exponential with optional variable-length plateau followed by falling exponential) Parameter p is [yOffset, t0, tau1, tau2, amp, width] """ yOffset, t0, tau1, tau2, amp, width = p yd = numpy.empty(x.shape) yd[x<t0] = yOffset m1 = (x>=t0)&(x<(t0+width)) m2 = (x>=(t0+width)) x1 = x[m1] x2 = x[m2] yd[m1] = amp*(1-numpy.exp(-(x1-t0)/tau1))+yOffset amp2 = amp*(1-numpy.exp(-width/tau1)) ## y-value at start of decay yd[m2] = ((amp2)*numpy.exp(-(x2-(width+t0))/tau2))+yOffset if y == None: return yd else: if sumsq is True: ss = numpy.sqrt(numpy.sum((y-yd)**2.0)) return ss else: return y-yd def boltzeval(self,p, x, y=None, C = None, sumsq = False, weights=None): yd = p[0] + (p[1]-p[0])/(1.0 + numpy.exp((x-p[2])/p[3])) if y == None: return yd else: if sumsq is True: return numpy.sqrt(numpy.sum((y - yd)**2.0)) else: return y - yd def boltzeval2(self,p, x, y=None, C = None, sumsq = False, weights=None): yd = p[0] + p[1]/(1 + numpy.exp((x-p[2])/p[3])) + p[4]/(1 + numpy.exp((x-p[5])/p[6])) if y == None: return yd else: if sumsq is True: return numpy.sum((y - yd)**2) else: return y - yd def gausseval(self,p, x, y=None, C = None, sumsq = False, weights=None): yd = (p[0]/(p[2]*numpy.sqrt(2.0*numpy.pi)))*numpy.exp(-((x - p[1])**2.0)/(2.0*(p[2]**2.0))) if y == None: return yd else: if sumsq is True: return numpy.sum((y - yd)**2) else: return y - yd def lineeval(self, p, x, y=None, C = None, sumsq = False, weights=None): yd = p[0]*x + p[1] if y == None: return yd else: if sumsq is True: return numpy.sum((y - yd)**2) else: return y - yd def poly2eval(self, p, x, y=None, C = None, sumsq = False, weights=None): yd = p[0]*x**2.0 + p[1]*x + p[2] if y == None: return yd else: if sumsq is True: return numpy.sum((y - yd)**2) else: return y - yd def poly3eval(self, p, x, y=None, C = None, sumsq = False, weights=None): yd = p[0]*x**3.0 + p[1]*x**2.0 + p[2]*x +p[3] if y == None: return yd else: if sumsq is True: return numpy.sum((y - yd)**2) else: return y - yd def poly4eval(self, p, x, y=None, C = None, sumsq = False, weights=None): yd = p[0]*x**4.0 + p[1]*x**3.0 + p[2]*x**2.0 + p[3]*x +p[4] if y == None: return yd else: if sumsq is True: return numpy.sum((y - yd)**2) else: return y - yd def sineeval(self, p, x, y=None, C = None, sumsq = False, weights=None): yd = p[0] + p[1]*numpy.sin((x*2.0*numpy.pi/p[2])+p[3]) if y == None: return yd else: if sumsq is True: return numpy.sum((y - yd)**2) else: return y - yd def taucurve(self, p, x, y=None, C = None, sumsq=True, weights=None): """ HH-like description of activation/inactivation function 'DC', 'a1', 'v1', 'k1', 'a2', 'v2', 'k2' """ yd = p[0] + 1.0/(p[1]*numpy.exp((x+p[2])/p[3]) +p[4]*numpy.exp(-(x+p[5])/p[6])) if y == None: return yd else: if sumsq is True: return numpy.sqrt(numpy.sum((y - yd)**2)) else: return y - yd def taucurveder(self, p, x): """ Derivative for taucurve 'DC', 'a1', 'v1', 'k1', 'a2', 'v2', 'k2' """ y = -(p[1]*numpy.exp((p[2] + x)/p[3])/p[3] - p[4]*numpy.exp(-(p[5] + x)/p[6])/p[6])/(p[1]*numpy.exp((p[2] + x)/p[3]) + p[4]*numpy.exp(-(p[5] + x)/p[6]))**2.0 # print 'dy: ', y return y def getClipData(self, x, y, t0, t1): """ Return the values in y that match the x range in tx from t0 to t1. x must be monotonic increasing or decreasing. Allow for reverse ordering. """ it0 = (numpy.abs(x-t0)).argmin() it1 = (numpy.abs(x-t1)).argmin() if it0 > it1: t = it1 it1 = it0 it0 = t return(x[it0:it1], y[it0:it1]) def FitRegion(self, whichdata, thisaxis, tdat, ydat, t0 = None, t1 = None, fitFunc = 'exp1', fitFuncDer = None, fitPars = None, fixedPars = None, fitPlot = None, plotInstance = None, dataType= 'xy', method = None, bounds=None, weights=None, constraints=()): """ **Arguments** ============= =================================================== whichdata thisaxis tdat ydat t0 (optional) Minimum of time data - determined from tdat if left unspecified t1 (optional) Maximum of time data - determined from tdat if left unspecified fitFunc (optional) The function to fit the data to (as defined in __init__). Default is 'exp1'. fitFuncDer (optional) default=None fitPars (optional) Initial fit parameters. Use the values defined in self.fitfuncmap if unspecified. fixedPars (optional) Fixed parameters to pass to the function. Default=None fitPlot (optional) default=None plotInstance (optional) default=None dataType (optional) Options are ['xy', 'blocks']. Default='xy' method (optional) Options are ['curve_fit', 'fmin', 'simplex', 'Nelder-Mead', 'bfgs', 'TNC', 'SLSQP', 'COBYLA', 'L-BFGS-B', 'openopt']. Default='leastsq' bounds (optional) default=None weights (optional) default=None constraints (optional) default=() ============= =================================================== To call with tdat and ydat as simple arrays: FitRegion(1, 0, tdat, ydat, FitFunc = 'exp1') e.g., the first argument should be 1, but this axis is ignored if datatype is 'xy' """ self.fitSum2Err = 0.0 if t0 == t1: if plotInstance is not None and usingMPlot: (x, y) = plotInstance.getCoordinates() t0 = x[0] t1 = x[1] if t1 is None: t1 = numpy.max(tdat) if t0 is None: t0 = numpy.min(tdat) func = self.fitfuncmap[fitFunc] if func is None: print "FitRegion: unknown function %s" % (fitFunc) return xp = [] xf = [] yf = [] yn = [] tx = [] names = func[6] if fitPars is None: fpars = func[1] else: fpars = fitPars if method == 'simplex': # remap calls if needed for newer versions of scipy (>= 0.11) method = 'Nelder-Mead' if ydat.ndim == 1 or dataType == 'xy' or dataType == '2d': # check if 1-d, then "pretend" its only a 1-element block nblock = 1 else: nblock = ydat.shape[0] # otherwise, this is the number of traces in the block # print 'datatype: ', dataType # print 'nblock: ', nblock # print 'whichdata: ', whichdata # for block in range(nblock): for record in whichdata: if dataType == 'blocks': (tx, dy) = self.getClipData(tdat[block], ydat[block][record, thisaxis, :], t0, t1) else: (tx, dy) = self.getClipData(tdat, ydat[record,:], t0, t1) # print 'Fitting.py: block, type, Fit data: ', block, dataType # print tx.shape # print dy.shape yn.append(names) if not any(tx): continue # no data in the window... ier = 0 # # Different optimization methods are included here. Not all have been tested fully with # this wrapper. # if method is None or method == 'leastsq': # use standard leastsq, no bounds plsq, cov, infodict, mesg, ier = scipy.optimize.leastsq(func[0], fpars, args=(tx.astype('float64'), dy.astype('float64'), fixedPars), full_output = 1, maxfev = func[2]) if ier > 4: print "optimize.leastsq error flag is: %d" % (ier) print mesg elif method == 'curve_fit': print fpars print fixedPars plsq, cov = scipy.optimize.curve_fit(func[0], tx.astype('float64'), dy.astype('float64'), p0=fpars) ier = 0 elif method in ['fmin', 'simplex', 'Nelder-Mead', 'bfgs', 'TNC', 'SLSQP', 'COBYLA', 'L-BFGS-B']: # use standard wrapper from scipy for those routintes res = scipy.optimize.minimize(func[0], fpars, args=(tx.astype('float64'), dy.astype('float64'), fixedPars, True), method=method, jac=None, hess=None, hessp=None, bounds=bounds, constraints=constraints, tol=None, callback=None, options={'maxiter': func[2], 'disp': False }) plsq = res.x #print " method:", method #print " bounds:", bounds #print " result:", plsq # next section is replaced by the code above - kept here for reference if needed... # elif method == 'fmin' or method == 'simplex': # plsq = scipy.optimize.fmin(func[0], fpars, args=(tx.astype('float64'), dy.astype('float64'), fixedPars, True), # maxfun = func[2]) # , iprint=0) # ier = 0 # elif method == 'bfgs': # plsq, cov, infodict = scipy.optimize.fmin_l_bfgs_b(func[0], fpars, fprime=func[8], # args=(tx.astype('float64'), dy.astype('float64'), fixedPars, True, weights), # maxfun = func[2], bounds = bounds, # approx_grad = True) # , disp=0, iprint=-1) elif method == 'openopt': # use OpenOpt's routines - usually slower, but sometimes they converge better if not HAVE_OPENOPT: raise Exception("Requested openopt fitting method but openopt is not installed.") if bounds is not None: # unpack bounds lb = [y[0] for y in bounds] ub = [y[1] for y in bounds] fopt = openopt.DFP(func[0], fpars, tx, dy, df = fitFuncDer, lb=lb, ub=ub) # fopt.df = func[8] r = fopt.solve('nlp:ralg', plot=0, iprint = 10) plsq = r.xf ier = 0 else: fopt = openopt.DFP(func[0], fpars, tx, dy, df = fitFuncDer) print func[8] # fopt.df = func[7] fopt.checkdf() r = fopt.solve('nlp:ralg', plot=0, iprint = 10) plsq = r.xf ier = 0 else: print 'method %s not recognized, please check Fitting.py' % (method) return xfit = numpy.arange(min(tx), max(tx), (max(tx)-min(tx))/100.0) yfit = func[0](plsq, xfit, C=fixedPars) yy = func[0](plsq, tx, C=fixedPars) # calculate function self.fitSum2Err = numpy.sum((dy - yy)**2) if usingMPlot and FitPlot != None and plotInstance != None: self.FitPlot(xFit = xfit, yFit = yfit, fitFunc = fund[0], fitPars = plsq, plot = fitPlot, plotInstance = plotInstance) xp.append(plsq) # parameter list xf.append(xfit) # x plot point list yf.append(yfit) # y fit point list # print xp # print len(xp) return(xp, xf, yf, yn) # includes names with yn and range of tx def FitPlot(self, xFit = None, yFit = None, fitFunc = 'exp1', fitPars = None, fixedPars = None, fitPlot=None, plotInstance = None, color=None): """ Plot the fit data onto the fitPlot with the specified "plot Instance". if there is no xFit, or some parameters are missing, we just return. if there is xFit, but no yFit, then we try to compute the fit with what we have. The plot is superimposed on the specified "fitPlot" and the color is specified by the function color in the fitPars list. """ if xFit is None or fitPars is None: return func = self.fitfuncmap[fitFunc] if color is None: fcolor = func[3] else: fcolor = color if yFit is None: yFit = numpy.array([]) for k in range(0, len(fitPars)): yFit[k] = func[0](fitPars[k], xFit[k], C=fixedPars) if plotInstance is None or fitPlot is None: return(yfit) for k in range(0, len(fitPars)): plotInstance.PlotLine(fitPlot, xFit[k], yFit[k], color = fcolor) return(yfit) def getFitErr(self): """ Return the fit error for the most recent fit """ return(self.fitSum2Err) def expfit(self, x, y): """ find best fit of a single exponential function to x and y using the chebyshev polynomial approximation. returns (DC, A, tau) for fit. Perform a single exponential fit to data using Chebyshev polynomial method. Equation fit: y = a1 * exp(-x/tau) + a0 Call: [a0 a1 tau] = expfit(x,y); Calling parameter x is the time base, y is the data to be fit. Returned values: a0 is the offset, a1 is the amplitude, tau is the time constant (scaled in units of x). Relies on routines chebftd to generate polynomial coeffs, and chebint to compute the coefficients for the integral of the data. These are now included in this .py file source. This version is based on the one in the pClamp manual: HOWEVER, since I use the bounded [-1 1] form for the Chebyshev polynomials, the coefficients are different, and the resulting equation for tau is different. I manually optimized the tau estimate based on fits to some simulated noisy data. (Its ok to use the whole range of d1 and d0 when the data is clean, but only the first few coeffs really hold the info when the data is noisy.) NOTE: The user is responsible for making sure that the passed data is appropriate, e.g., no large noise or electronic transients, and that the time constants in the data are adequately sampled. To do a double exp fit with this method is possible, but more complex. It would be computationally simpler to try breaking the data into two regions where the fast and slow components are dominant, and fit each separately; then use that to seed a non-linear fit (e.g., L-M) algorithm. Final working version 4/13/99 Paul B. Manis converted to Python 7/9/2009 Paul B. Manis. Seems functional. """ n = 30; # default number of polynomials coeffs to use in fit a = numpy.amin(x) b = numpy.amax(x) d0 = self.chebftd(a, b, n, x, y) # coeffs for data trace... d1 = self.chebint(a, b, d0, n) # coeffs of integral... tau = -numpy.mean(d1[2:3]/d0[2:3]) try: g = numpy.exp(-x/tau) except: g = 0.0 dg = self.chebftd(a, b, n, x, g) # generate chebyshev polynomial for unit exponential function # now estimate the amplitude from the ratios of the coeffs. a1 = self.estimate(d0, dg, 1) a0 = (d0[0]-a1*dg[0])/2.0 # get the offset here return(a0, a1, tau)# def estimate(self, c, d, m): """ compute optimal estimate of parameter from arrays of data """ n = len(c) a = sum(c[m:n]*d[m:n])/sum(d[m:n]**2.0) return(a) # note : the following routine is a bottleneck. It should be coded in C. def chebftd(self, a, b, n, t, d): """ Chebyshev fit; from Press et al, p 192. matlab code P. Manis 21 Mar 1999 "Given a function func, lower and upper limits of the interval [a,b], and a maximum degree, n, this routine computes the n coefficients c[1..n] such that func(x) sum(k=1, n) of ck*Tk(y) - c0/2, where y = (x -0.5*(b+a))/(0.5*(b-a)) This routine is to be used with moderately large n (30-50) the array of c's is subsequently truncated at the smaller value m such that cm and subsequent terms are negligible." This routine is modified so that we find close points in x (data array) - i.e., we find the best Chebyshev terms to describe the data as if it is an arbitrary function. t is the x data, d is the y data... """ bma = 0.5*(b-a) bpa = 0.5*(b+a) inc = t[1]-t[0] f = numpy.zeros(n) for k in range(0, n): y = numpy.cos(numpy.pi*(k+0.5)/n) pos = int(0.5+(y*bma+bpa)/inc) if pos < 0: pos = 0 if pos >= len(d)-2: pos = len(d)-2 try: f[k]= d[pos+1] except: print "error in chebftd: k = %d (len f = %d) pos = %d, len(d) = %d\n" % (k, len(f), pos, len(d)) print "you should probably make sure this doesn't happen" fac = 2.0/n c=numpy.zeros(n) for j in range(0, n): sum=0.0 for k in range(0, n): sum = sum + f[k]*numpy.cos(numpy.pi*j*(k+0.5)/n) c[j]=fac*sum return(c) def chebint(self, a, b, c, n): """ Given a, b, and c[1..n] as output from chebft or chebftd, and given n, the desired degree of approximation (length of c to be used), this routine computes cint, the Chebyshev coefficients of the integral of the function whose coeffs are in c. The constant of integration is set so that the integral vanishes at a. Coded from Press et al, 3/21/99 P. Manis (Matlab) Python translation 7/8/2009 P. Manis """ sum = 0.0 fac = 1.0 con = 0.25*(b-a) # factor that normalizes the interval cint = numpy.zeros(n) for j in range(1,n-2): cint[j]=con*(c[j-1]-c[j+1])/j sum = sum + fac * cint[j] fac = - fac cint[n-1] = con*c[n-2]/(n-1) sum = sum + fac*cint[n-1] cint[0] = 2.0*sum # set constant of integration. return(cint) # routine to flatten an array/list. # def flatten(self, l, ltypes=(list, tuple)): i = 0 while i < len(l): while isinstance(l[i], ltypes): if not l[i]: l.pop(i) if not len(l): break else: l[i:i+1] = list(l[i]) i += 1 return l # flatten() # run tests if we are "main" if __name__ == "__main__": # import matplotlib.pyplot as pyplot import timeit import Fitting import matplotlib as MP MP.use('Qt4Agg') ################## Do not modify the following code # sets up matplotlib with sans-serif plotting... import matplotlib.gridspec as GS # import mpl_toolkits.axes_grid1.inset_locator as INSETS # #import inset_axes, zoomed_inset_axes # import mpl_toolkits.axes_grid1.anchored_artists as ANCHOR # # import AnchoredSizeBar stdFont = 'Arial' import matplotlib.pyplot as pylab pylab.rcParams['text.usetex'] = True pylab.rcParams['interactive'] = False pylab.rcParams['font.family'] = 'sans-serif' pylab.rcParams['font.sans-serif'] = 'Arial' pylab.rcParams['mathtext.default'] = 'sf' pylab.rcParams['figure.facecolor'] = 'white' # next setting allows pdf font to be readable in Adobe Illustrator pylab.rcParams['pdf.fonttype'] = 42 pylab.rcParams['text.dvipnghack'] = True ##################### to here (matplotlib stuff - touchy! Fits = Fitting.Fitting() # x = numpy.arange(0, 100.0, 0.1) # y = 5.0-2.5*numpy.exp(-x/5.0)+0.5*numpy.random.randn(len(x)) # (dc, aFit,tauFit) = Fits.expfit(x,y) # yf = dc + aFit*numpy.exp(-x/tauFit) # pyplot.figure(1) # pyplot.plot(x,y,'k') # pyplot.hold(True) # pyplot.plot(x, yf, 'r') # pyplot.show() exploreError = False if exploreError is True: # explore the error surface for a function: func = 'exppulse' f = Fits.fitfuncmap[func] p1range = numpy.arange(0.1, 5.0, 0.1) p2range = numpy.arange(0.1, 5.0, 0.1) err = numpy.zeros((len(p1range), len(p2range))) x = numpy.array(numpy.arange(f[4][0], f[4][1], f[4][2])) C = None if func == 'expsum2': C = f[7] # check exchange of tau1 ([1]) and width[4] C = None yOffset, t0, tau1, tau2, amp, width = f[1] # get inital parameters y0 = f[0](f[1], x, C=C) noise = numpy.random.random(y0.shape) - 0.5 y0 += 0.0* noise sh = err.shape yp = numpy.zeros((sh[0], sh[1], len(y0))) for i, p1 in enumerate(p1range): tau1t = tau1*p1 for j, p2 in enumerate(p2range): ampt = amp*p2 pars = (yOffset, t0, tau1t, tau2, ampt, width) # repackage err[i,j] = f[0](pars, x, y0, C=C, sumsq = True) yp[i,j] = f[0](pars, x, C=C, sumsq = False) pylab.figure() CS=pylab.contour(p1range*tau1, p2range*width, err, 25) CB = pylab.colorbar(CS, shrink=0.8, extend='both') pylab.figure() for i, p1 in enumerate(p1range): for j, p2 in enumerate(p2range): pylab.plot(x, yp[i,j]) pylab.plot(x, y0, 'r-', linewidth=2.0) # run tests for each type of fit, return results to compare parameters cons = None bnds = None signal_to_noise = 100000. for func in Fits.fitfuncmap: if func != 'exppulse': continue print "\nFunction: %s\nTarget: " % (func), f = Fits.fitfuncmap[func] for k in range(0,len(f[1])): print "%f " % (f[1][k]), print "\nStarting: ", for k in range(0,len(f[5])): print "%f " % (f[5][k]), # nstep = 500.0 # if func == 'sin': # nstep = 100.0 x = numpy.array(numpy.arange(f[4][0], f[4][1], f[4][2])) C = None if func == 'expsum2': C = f[7] if func == 'exppulse': C = f[7] y = f[0](f[1], x, C=C) yd = numpy.array(y) noise = numpy.random.normal(0, 0.1, yd.shape) my = numpy.amax(yd) #yd = yd + sigmax*0.05*my*(numpy.random.random_sample(shape(yd))-0.5) yd += noise*my/signal_to_noise testMethod = 'SLSQP' if func == 'taucurve': continue bounds=[(0., 100.), (0., 1000.), (0.0, 500.0), (0.1, 50.0), (0., 1000), (0.0, 500.0), (0.1, 50.0)] (fpar, xf, yf, names) = Fits.FitRegion(numpy.array([1]), 0, x, yd, fitFunc = func, bounds=bounds, method=testMethod) elif func == 'boltz': continue bounds = [(-0.5,0.5), (0.0, 20.0), (-120., 0.), (-20., 0.)] (fpar, xf, yf, names) = Fits.FitRegion(numpy.array([1]), 0, x, yd, fitFunc = func, bounds=bounds, method=testMethod) elif func == 'exp2': bounds=[(-0.001, 0.001), (-5.0, 0.), (1.0, 500.0), (-5.0, 0.0), (1., 10000.)] (fpar, xf, yf, names) = Fits.FitRegion(numpy.array([1]), 0, x, yd, fitFunc = func, bounds=bounds, method=testMethod) elif func == 'exppulse': # set some constraints to the fitting # yOffset, tau1, tau2, amp, width = f[1] # order of constraings dt = numpy.mean(numpy.diff(x)) bounds = [(-5, 5), (-15., 15.), (-2, 2.0), (2-10, 10.), (-5, 5.), (0., 5.)] # cxample for constraints: # cons = ({'type': 'ineq', 'fun': lambda x: x[4] - 3.0*x[2]}, # {'type': 'ineq', 'fun': lambda x: - x[4] + 12*x[2]}, # {'type': 'ineq', 'fun': lambda x: x[2]}, # {'type': 'ineq', 'fun': lambda x: - x[4] + 2000}, # ) cons = ({'type': 'ineq', 'fun': lambda x: x[3] - x[2] }, # tau1 < tau2 ) C = None tv = f[5] initialgr = f[0](f[5], x, None ) (fpar, xf, yf, names) = Fits.FitRegion( numpy.array([1]), 0, x, yd, fitFunc = func, fixedPars = C, constraints = cons, bounds = bounds, method=testMethod) # print xf # print yf # print fpar # print names else: (fpar, xf, yf, names) = Fits.FitRegion( numpy.array([1]), 0, x, yd, fitFunc = func, fixedPars = C, constraints = cons, bounds = bnds, method=testMethod) #print fpar s = numpy.shape(fpar) j = 0 outstr = "" initstr = "" truestr = "" for i in range(0, len(names[j])): # print "%f " % fpar[j][i], outstr = outstr + ('%s = %f, ' % (names[j][i], fpar[j][i])) initstr = initstr + '%s = %f, ' % (names[j][i], tv[i]) truestr = truestr + '%s = %f, ' % (names[j][i], f[1][i]) print( "\nTrue(%d) : %s" % (j, truestr) ) print( "FIT(%d) : %s" % (j, outstr) ) print( "init(%d) : %s" % (j, initstr) ) print( "Error: : %f" % (Fits.fitSum2Err)) if func is 'exppulse': pylab.figure() pylab.plot(numpy.array(x), yd, 'ro-') pylab.hold(True) pylab.plot(numpy.array(x), initialgr, 'k--') pylab.plot(xf[0], yf[0], 'b-') # fit pylab.show()
mit
RayMick/scikit-learn
examples/semi_supervised/plot_label_propagation_digits.py
268
2723
""" =================================================== Label Propagation digits: Demonstrating performance =================================================== This example demonstrates the power of semisupervised learning by training a Label Spreading model to classify handwritten digits with sets of very few labels. The handwritten digit dataset has 1797 total points. The model will be trained using all points, but only 30 will be labeled. Results in the form of a confusion matrix and a series of metrics over each class will be very good. At the end, the top 10 most uncertain predictions will be shown. """ print(__doc__) # Authors: Clay Woolam <clay@woolam.org> # Licence: BSD import numpy as np import matplotlib.pyplot as plt from scipy import stats from sklearn import datasets from sklearn.semi_supervised import label_propagation from sklearn.metrics import confusion_matrix, classification_report digits = datasets.load_digits() rng = np.random.RandomState(0) indices = np.arange(len(digits.data)) rng.shuffle(indices) X = digits.data[indices[:330]] y = digits.target[indices[:330]] images = digits.images[indices[:330]] n_total_samples = len(y) n_labeled_points = 30 indices = np.arange(n_total_samples) unlabeled_set = indices[n_labeled_points:] # shuffle everything around y_train = np.copy(y) y_train[unlabeled_set] = -1 ############################################################################### # Learn with LabelSpreading lp_model = label_propagation.LabelSpreading(gamma=0.25, max_iter=5) lp_model.fit(X, y_train) predicted_labels = lp_model.transduction_[unlabeled_set] true_labels = y[unlabeled_set] cm = confusion_matrix(true_labels, predicted_labels, labels=lp_model.classes_) print("Label Spreading model: %d labeled & %d unlabeled points (%d total)" % (n_labeled_points, n_total_samples - n_labeled_points, n_total_samples)) print(classification_report(true_labels, predicted_labels)) print("Confusion matrix") print(cm) # calculate uncertainty values for each transduced distribution pred_entropies = stats.distributions.entropy(lp_model.label_distributions_.T) # pick the top 10 most uncertain labels uncertainty_index = np.argsort(pred_entropies)[-10:] ############################################################################### # plot f = plt.figure(figsize=(7, 5)) for index, image_index in enumerate(uncertainty_index): image = images[image_index] sub = f.add_subplot(2, 5, index + 1) sub.imshow(image, cmap=plt.cm.gray_r) plt.xticks([]) plt.yticks([]) sub.set_title('predict: %i\ntrue: %i' % ( lp_model.transduction_[image_index], y[image_index])) f.suptitle('Learning with small amount of labeled data') plt.show()
bsd-3-clause
molly24Huang/Cents_trip
Recommendation/attr_food_distance.py
1
2978
import pandas as pd from math import sin, cos, sqrt, asin, radians #import ibm_db def cal_dist(lon1, lat1, lon2, lat2): lon1, lat1, lon2, lat2 = map(radians, [lon1, lat1, lon2, lat2]) dlon = lon2 - lon1 dlat = lat2 - lat1 a = sin(dlat / 2) ** 2 + cos(lat1) * cos(lat2) * sin(dlon / 2) ** 2 c = 2 * asin(sqrt(a)) distance = 6378.137 * c return distance food = 'D:\\Dropbox\\Mcomp\\CS5224\\Project\\Cents_trip-master\\dataset\\food.csv' tourism_attractions = 'D:\\Dropbox\\Mcomp\\CS5224\\Project\\Cents_trip-master\\dataset\\TOURISM_ATTRACTIONS.csv' food_df = pd.read_csv(food) tourism_attractions_df = pd.read_csv(tourism_attractions) food_data = food_df.iloc[:,[0,6,7]] tourism_attractions_data = tourism_attractions_df.iloc[:,[0,2,3]] foodid = food_data['FOODID'].as_matrix() #print(len(roomid)) lat_food = food_data['LATITUDE'].as_matrix() lng_food = food_data['LONGITUDE'].as_matrix() attractionid = tourism_attractions_data['ATTRACTIONID'].as_matrix() #print(attractionid) lat_attractions = tourism_attractions_data['LATITUDE'].as_matrix() lng_attractions = tourism_attractions_data['LONGITUDE'].as_matrix() distances = [] # conn = ibm_db.connect("DATABASE=BLUDB;HOSTNAME=dashdb-entry-yp-dal09-09.services.dal.bluemix.net;\ # PORT=50000;PROTOCOL=TCPIP;UID=dash9787;\ # PWD=X_c03EeYTe#u;", "", "") for i in range(len(tourism_attractions_data)): for k in range(len(food_data)): distance = cal_dist(lng_attractions[i], lat_attractions[i], lng_food[k], lat_food[k]) # print(distance) distances.append(distance) output = open('rating.txt','w') k = 1 for i in range(len(tourism_attractions_data)): for j in range(len(food_data)): this_attractid = str(attractionid[i]) this_foodid = str(foodid[j]) this_distance = str(distances[(i + 1)* j]) output.write(this_attractid) output.write('\t') output.write(this_foodid) output.write('\t') output.write(this_distance) output.write('\n') output.close() #print(len(distances)) # k = 1 # for i in range(len(tourism_attractions_data)): # for j in range(len(food_data)): # this_attractid = attractionid[i] # this_foodid = foodid[j] # this_distance = distances[(i + 1)* j] # sql = r'INSERT INTO DISTANCE_FOOD_ATTRACTION(ATTRACTIONID, FOODID, DISTANCE) VALUES({attractionID}, {foodID}, {distance})'.format( # attractionID=this_attractid, foodID=this_foodid, distance=this_distance # ) # print(sql, '>>') # try: # stmt = ibm_db.exec_immediate(conn, sql) # except Exception as e: # print(e) # print("Inserting couldn't be completed.") # ibm_db.rollback(conn) # else: # ibm_db.commit(conn) # print("Inserting complete.") # print('-----' + str(k) + '-----') # k += 1 # #
apache-2.0
costypetrisor/scikit-learn
examples/tree/plot_tree_regression_multioutput.py
43
1791
""" =================================================================== Multi-output Decision Tree Regression =================================================================== An example to illustrate multi-output regression with decision tree. The :ref:`decision trees <tree>` is used to predict simultaneously the noisy x and y observations of a circle given a single underlying feature. As a result, it learns local linear regressions approximating the circle. We can see that if the maximum depth of the tree (controlled by the `max_depth` parameter) is set too high, the decision trees learn too fine details of the training data and learn from the noise, i.e. they overfit. """ print(__doc__) import numpy as np import matplotlib.pyplot as plt from sklearn.tree import DecisionTreeRegressor # Create a random dataset rng = np.random.RandomState(1) X = np.sort(200 * rng.rand(100, 1) - 100, axis=0) y = np.array([np.pi * np.sin(X).ravel(), np.pi * np.cos(X).ravel()]).T y[::5, :] += (0.5 - rng.rand(20, 2)) # Fit regression model clf_1 = DecisionTreeRegressor(max_depth=2) clf_2 = DecisionTreeRegressor(max_depth=5) clf_3 = DecisionTreeRegressor(max_depth=8) clf_1.fit(X, y) clf_2.fit(X, y) clf_3.fit(X, y) # Predict X_test = np.arange(-100.0, 100.0, 0.01)[:, np.newaxis] y_1 = clf_1.predict(X_test) y_2 = clf_2.predict(X_test) y_3 = clf_3.predict(X_test) # Plot the results plt.figure() plt.scatter(y[:, 0], y[:, 1], c="k", label="data") plt.scatter(y_1[:, 0], y_1[:, 1], c="g", label="max_depth=2") plt.scatter(y_2[:, 0], y_2[:, 1], c="r", label="max_depth=5") plt.scatter(y_3[:, 0], y_3[:, 1], c="b", label="max_depth=8") plt.xlim([-6, 6]) plt.ylim([-6, 6]) plt.xlabel("data") plt.ylabel("target") plt.title("Multi-output Decision Tree Regression") plt.legend() plt.show()
bsd-3-clause
wenhuchen/ETHZ-Bootstrapped-Captioning
visual-concepts/coco/PythonAPI/pycocotools/coco.py
1
16953
__author__ = 'tylin' __version__ = '2.0' # Interface for accessing the Microsoft COCO dataset. # Microsoft COCO is a large image dataset designed for object detection, # segmentation, and caption generation. pycocotools is a Python API that # assists in loading, parsing and visualizing the annotations in COCO. # Please visit http://mscoco.org/ for more information on COCO, including # for the data, paper, and tutorials. The exact format of the annotations # is also described on the COCO website. For example usage of the pycocotools # please see pycocotools_demo.ipynb. In addition to this API, please download both # the COCO images and annotations in order to run the demo. # An alternative to using the API is to load the annotations directly # into Python dictionary # Using the API provides additional utility functions. Note that this API # supports both *instance* and *caption* annotations. In the case of # captions not all functions are defined (e.g. categories are undefined). # The following API functions are defined: # COCO - COCO api class that loads COCO annotation file and prepare data structures. # decodeMask - Decode binary mask M encoded via run-length encoding. # encodeMask - Encode binary mask M using run-length encoding. # getAnnIds - Get ann ids that satisfy given filter conditions. # getCatIds - Get cat ids that satisfy given filter conditions. # getImgIds - Get img ids that satisfy given filter conditions. # loadAnns - Load anns with the specified ids. # loadCats - Load cats with the specified ids. # loadImgs - Load imgs with the specified ids. # segToMask - Convert polygon segmentation to binary mask. # showAnns - Display the specified annotations. # loadRes - Load algorithm results and create API for accessing them. # download - Download COCO images from mscoco.org server. # Throughout the API "ann"=annotation, "cat"=category, and "img"=image. # Help on each functions can be accessed by: "help COCO>function". # See also COCO>decodeMask, # COCO>encodeMask, COCO>getAnnIds, COCO>getCatIds, # COCO>getImgIds, COCO>loadAnns, COCO>loadCats, # COCO>loadImgs, COCO>segToMask, COCO>showAnns # Microsoft COCO Toolbox. version 2.0 # Data, paper, and tutorials available at: http://mscoco.org/ # Code written by Piotr Dollar and Tsung-Yi Lin, 2014. # Licensed under the Simplified BSD License [see bsd.txt] import json import time import matplotlib.pyplot as plt from matplotlib.collections import PatchCollection from matplotlib.patches import Polygon import numpy as np import urllib import copy import itertools import mask import os from collections import defaultdict class COCO: def __init__(self, annotation_file=None): """ Constructor of Microsoft COCO helper class for reading and visualizing annotations. :param annotation_file (str): location of annotation file :param image_folder (str): location to the folder that hosts images. :return: """ # load dataset self.dataset,self.anns,self.cats,self.imgs = dict(),dict(),dict(),dict() self.imgToAnns, self.catToImgs = defaultdict(list), defaultdict(list) if not annotation_file == None: print 'loading annotations into memory...' tic = time.time() dataset = json.load(open(annotation_file, 'r')) assert type(dataset)==dict, "annotation file format %s not supported"%(type(dataset)) print 'Done (t=%0.2fs)'%(time.time()- tic) self.dataset = dataset self.createIndex() def createIndex(self): # create index print 'creating index...' anns,cats,imgs = dict(),dict(),dict() imgToAnns,catToImgs = defaultdict(list),defaultdict(list) if 'annotations' in self.dataset: for ann in self.dataset['annotations']: imgToAnns[ann['image_id']].append(ann) anns[ann['id']] = ann if 'images' in self.dataset: for img in self.dataset['images']: imgs[img['id']] = img if 'categories' in self.dataset: for cat in self.dataset['categories']: cats[cat['id']] = cat for ann in self.dataset['annotations']: catToImgs[ann['category_id']].append(ann['image_id']) print 'index created!' # create class members self.anns = anns self.imgToAnns = imgToAnns self.catToImgs = catToImgs self.imgs = imgs self.cats = cats def info(self): """ Print information about the annotation file. :return: """ for key, value in self.dataset['info'].items(): print '%s: %s'%(key, value) def getAnnIds(self, imgIds=[], catIds=[], areaRng=[], iscrowd=None): """ Get ann ids that satisfy given filter conditions. default skips that filter :param imgIds (int array) : get anns for given imgs catIds (int array) : get anns for given cats areaRng (float array) : get anns for given area range (e.g. [0 inf]) iscrowd (boolean) : get anns for given crowd label (False or True) :return: ids (int array) : integer array of ann ids """ imgIds = imgIds if type(imgIds) == list else [imgIds] catIds = catIds if type(catIds) == list else [catIds] if len(imgIds) == len(catIds) == len(areaRng) == 0: anns = self.dataset['annotations'] else: if not len(imgIds) == 0: lists = [self.imgToAnns[imgId] for imgId in imgIds if imgId in self.imgToAnns] anns = list(itertools.chain.from_iterable(lists)) else: anns = self.dataset['annotations'] anns = anns if len(catIds) == 0 else [ann for ann in anns if ann['category_id'] in catIds] anns = anns if len(areaRng) == 0 else [ann for ann in anns if ann['area'] > areaRng[0] and ann['area'] < areaRng[1]] if not iscrowd == None: ids = [ann['id'] for ann in anns if ann['iscrowd'] == iscrowd] else: ids = [ann['id'] for ann in anns] return ids def getCatIds(self, catNms=[], supNms=[], catIds=[]): """ filtering parameters. default skips that filter. :param catNms (str array) : get cats for given cat names :param supNms (str array) : get cats for given supercategory names :param catIds (int array) : get cats for given cat ids :return: ids (int array) : integer array of cat ids """ catNms = catNms if type(catNms) == list else [catNms] supNms = supNms if type(supNms) == list else [supNms] catIds = catIds if type(catIds) == list else [catIds] if len(catNms) == len(supNms) == len(catIds) == 0: cats = self.dataset['categories'] else: cats = self.dataset['categories'] cats = cats if len(catNms) == 0 else [cat for cat in cats if cat['name'] in catNms] cats = cats if len(supNms) == 0 else [cat for cat in cats if cat['supercategory'] in supNms] cats = cats if len(catIds) == 0 else [cat for cat in cats if cat['id'] in catIds] ids = [cat['id'] for cat in cats] return ids def getImgIds(self, imgIds=[], catIds=[]): ''' Get img ids that satisfy given filter conditions. :param imgIds (int array) : get imgs for given ids :param catIds (int array) : get imgs with all given cats :return: ids (int array) : integer array of img ids ''' imgIds = imgIds if type(imgIds) == list else [imgIds] catIds = catIds if type(catIds) == list else [catIds] if len(imgIds) == len(catIds) == 0: ids = self.imgs.keys() else: ids = set(imgIds) for i, catId in enumerate(catIds): if i == 0 and len(ids) == 0: ids = set(self.catToImgs[catId]) else: ids &= set(self.catToImgs[catId]) return list(ids) def loadAnns(self, ids=[]): """ Load anns with the specified ids. :param ids (int array) : integer ids specifying anns :return: anns (object array) : loaded ann objects """ if type(ids) == list: return [self.anns[id] for id in ids] elif type(ids) == int: return [self.anns[ids]] def loadCats(self, ids=[]): """ Load cats with the specified ids. :param ids (int array) : integer ids specifying cats :return: cats (object array) : loaded cat objects """ if type(ids) == list: return [self.cats[id] for id in ids] elif type(ids) == int: return [self.cats[ids]] def loadImgs(self, ids=[]): """ Load anns with the specified ids. :param ids (int array) : integer ids specifying img :return: imgs (object array) : loaded img objects """ if type(ids) == list: return [self.imgs[id] for id in ids] elif type(ids) == int: return [self.imgs[ids]] def showAnns(self, anns): """ Display the specified annotations. :param anns (array of object): annotations to display :return: None """ if len(anns) == 0: return 0 if 'segmentation' in anns[0] or 'keypoints' in anns[0]: datasetType = 'instances' elif 'caption' in anns[0]: datasetType = 'captions' else: raise Exception("datasetType not supported") if datasetType == 'instances': ax = plt.gca() ax.set_autoscale_on(False) polygons = [] color = [] for ann in anns: c = (np.random.random((1, 3))*0.6+0.4).tolist()[0] if 'segmentation' in ann: if type(ann['segmentation']) == list: # polygon for seg in ann['segmentation']: poly = np.array(seg).reshape((len(seg)/2, 2)) polygons.append(Polygon(poly)) color.append(c) else: # mask t = self.imgs[ann['image_id']] if type(ann['segmentation']['counts']) == list: rle = mask.frPyObjects([ann['segmentation']], t['height'], t['width']) else: rle = [ann['segmentation']] m = mask.decode(rle) img = np.ones( (m.shape[0], m.shape[1], 3) ) if ann['iscrowd'] == 1: color_mask = np.array([2.0,166.0,101.0])/255 if ann['iscrowd'] == 0: color_mask = np.random.random((1, 3)).tolist()[0] for i in range(3): img[:,:,i] = color_mask[i] ax.imshow(np.dstack( (img, m*0.5) )) if 'keypoints' in ann and type(ann['keypoints']) == list: # turn skeleton into zero-based index sks = np.array(self.loadCats(ann['category_id'])[0]['skeleton'])-1 kp = np.array(ann['keypoints']) x = kp[0::3] y = kp[1::3] v = kp[2::3] for sk in sks: if np.all(v[sk]>0): plt.plot(x[sk],y[sk], linewidth=3, color=c) plt.plot(x[v>0], y[v>0],'o',markersize=8, markerfacecolor=c, markeredgecolor='k',markeredgewidth=2) plt.plot(x[v>1], y[v>1],'o',markersize=8, markerfacecolor=c, markeredgecolor=c, markeredgewidth=2) p = PatchCollection(polygons, facecolor=color, linewidths=0, alpha=0.4) ax.add_collection(p) p = PatchCollection(polygons, facecolor="none", edgecolors=color, linewidths=2) ax.add_collection(p) elif datasetType == 'captions': for ann in anns: print ann['caption'] def loadRes(self, resFile): """ Load result file and return a result api object. :param resFile (str) : file name of result file :return: res (obj) : result api object """ res = COCO() res.dataset['images'] = [img for img in self.dataset['images']] print 'Loading and preparing results... ' tic = time.time() if type(resFile) == str or type(resFile) == unicode: anns = json.load(open(resFile)) elif type(resFile) == np.ndarray: anns = self.loadNumpyAnnotations(resFile) else: anns = resFile assert type(anns) == list, 'results in not an array of objects' annsImgIds = [ann['image_id'] for ann in anns] assert set(annsImgIds) == (set(annsImgIds) & set(self.getImgIds())), \ 'Results do not correspond to current coco set' if 'caption' in anns[0]: imgIds = set([img['id'] for img in res.dataset['images']]) & set([ann['image_id'] for ann in anns]) res.dataset['images'] = [img for img in res.dataset['images'] if img['id'] in imgIds] for id, ann in enumerate(anns): ann['id'] = id+1 elif 'bbox' in anns[0] and not anns[0]['bbox'] == []: res.dataset['categories'] = copy.deepcopy(self.dataset['categories']) for id, ann in enumerate(anns): bb = ann['bbox'] x1, x2, y1, y2 = [bb[0], bb[0]+bb[2], bb[1], bb[1]+bb[3]] if not 'segmentation' in ann: ann['segmentation'] = [[x1, y1, x1, y2, x2, y2, x2, y1]] ann['area'] = bb[2]*bb[3] ann['id'] = id+1 ann['iscrowd'] = 0 elif 'segmentation' in anns[0]: res.dataset['categories'] = copy.deepcopy(self.dataset['categories']) for id, ann in enumerate(anns): # now only support compressed RLE format as segmentation results ann['area'] = mask.area([ann['segmentation']])[0] if not 'bbox' in ann: ann['bbox'] = mask.toBbox([ann['segmentation']])[0] ann['id'] = id+1 ann['iscrowd'] = 0 elif 'keypoints' in anns[0]: res.dataset['categories'] = copy.deepcopy(self.dataset['categories']) for id, ann in enumerate(anns): s = ann['keypoints'] x = s[0::3] y = s[1::3] x0,x1,y0,y1 = np.min(x), np.max(x), np.min(y), np.max(y) ann['area'] = (x1-x0)*(y1-y0) ann['id'] = id + 1 ann['bbox'] = [x0,y0,x1-x0,y1-y0] print 'DONE (t=%0.2fs)'%(time.time()- tic) res.dataset['annotations'] = anns res.createIndex() return res def download( self, tarDir = None, imgIds = [] ): ''' Download COCO images from mscoco.org server. :param tarDir (str): COCO results directory name imgIds (list): images to be downloaded :return: ''' if tarDir is None: print 'Please specify target directory' return -1 if len(imgIds) == 0: imgs = self.imgs.values() else: imgs = self.loadImgs(imgIds) N = len(imgs) if not os.path.exists(tarDir): os.makedirs(tarDir) for i, img in enumerate(imgs): tic = time.time() fname = os.path.join(tarDir, img['file_name']) if not os.path.exists(fname): urllib.urlretrieve(img['coco_url'], fname) print 'downloaded %d/%d images (t=%.1fs)'%(i, N, time.time()- tic) def loadNumpyAnnotations(self, data): """ Convert result data from a numpy array [Nx7] where each row contains {imageID,x1,y1,w,h,score,class} :param data (numpy.ndarray) :return: annotations (python nested list) """ print("Converting ndarray to lists...") assert(type(data) == np.ndarray) print(data.shape) assert(data.shape[1] == 7) N = data.shape[0] ann = [] for i in range(N): if i % 1000000 == 0: print("%d/%d" % (i,N)) ann += [{ 'image_id' : int(data[i, 0]), 'bbox' : [ data[i, 1], data[i, 2], data[i, 3], data[i, 4] ], 'score' : data[i, 5], 'category_id': int(data[i, 6]), }] return ann
bsd-3-clause
emdodds/LCAversions
timing.py
1
3230
#This file will time various versions of LCA from __future__ import division import numpy as np import sklearn.preprocessing as skp from timeit import default_timer as timer from LCAnumpy import lca as lcan from LCAfortran import lca as lcaf from LCAnumbaprog import lca as lcag def main(): """Profiles various versions of LCA.""" nshort = 6 tshort = 2 nmed = 3 tmed = 6 nlong = 1 #Setup variables for inference numDict = int(2048) numBatch = int(128) dataSize = int(256) dictsIn = np.random.randn(numDict,dataSize) # LCA requires that dictionary be unit norm dictsIn = skp.normalize(dictsIn, axis=1) stimuli = np.random.randn(numBatch,dataSize) batchCoeffs = np.random.randn(numBatch,numDict) coeffs = np.zeros((numBatch, numDict)) eta = .01 lamb = .05 nIter = 300 adapt = .99 softThresh = 0 thresh = np.random.randn(numBatch) #LCA params = """Parameters: numDict: """+str(numDict)+""" numBatch: """+str(numBatch)+""" dataSize: """+str(dataSize)+""" nIter: """+str(nIter)+"""\n""" print params start = timer() lcan.infer(dictsIn,stimuli,eta,lamb,nIter,adapt) dt = timer()-start if dt < tshort: n_times = nshort elif dt < tmed: n_times = nmed else: n_times = nlong for ii in xrange(n_times-1): start = timer() lcan.infer(dictsIn,stimuli,eta,lamb,nIter,adapt) dt = dt+timer()-start dt = dt/(n_times) print '---------------Numpy based LCA----------------' print 'Average time over '+str(n_times)+' trials:' print '%f s' % dt dictsIn = np.array(dictsIn,order='F') stimuli = np.array(stimuli,order='F') coeffs = np.array(coeffs,order='F') batchCoeffs = np.array(batchCoeffs,order='F') thresh = np.array(thresh,order='F') start = timer() lcaf.lca(dictsIn,stimuli,eta,lamb,nIter,softThresh,adapt,coeffs,batchCoeffs,thresh,numDict,numBatch,dataSize) dt = timer()-start if dt < tshort: n_times = nshort elif dt < tmed: n_times = nmed else: n_times = nlong for ii in xrange(n_times-1): start = timer() lcaf.lca(dictsIn,stimuli,eta,lamb,nIter,softThresh,adapt,coeffs,batchCoeffs,thresh,numDict,numBatch,dataSize) dt = dt+timer()-start dt = dt/(n_times) print '---------------Fortran based LCA--------------' print 'Average time over '+str(n_times)+' trials:' print '%f s' % dt dictsIn = np.array(dictsIn,dtype=np.float32,order='F') stimuli = np.array(stimuli,dtype=np.float32,order='F') start = timer() lcag.infer(dictsIn,stimuli,eta,lamb,nIter,adapt) dt = timer()-start if dt < tshort: n_times = nshort elif dt < tmed: n_times = nmed else: n_times = nlong for ii in xrange(n_times-1): start = timer() lcag.infer(dictsIn,stimuli,eta,lamb,nIter,adapt) dt = dt+timer()-start dt = dt/(n_times) print '----------------GPU based LCA-----------------' print 'Average time over '+str(n_times)+' trials:' print '%f s' % dt if __name__ == '__main__': main()
mit
nmayorov/scikit-learn
sklearn/linear_model/logistic.py
9
67760
""" Logistic Regression """ # Author: Gael Varoquaux <gael.varoquaux@normalesup.org> # Fabian Pedregosa <f@bianp.net> # Alexandre Gramfort <alexandre.gramfort@telecom-paristech.fr> # Manoj Kumar <manojkumarsivaraj334@gmail.com> # Lars Buitinck # Simon Wu <s8wu@uwaterloo.ca> import numbers import warnings import numpy as np from scipy import optimize, sparse from .base import LinearClassifierMixin, SparseCoefMixin, BaseEstimator from .sag import sag_solver from ..feature_selection.from_model import _LearntSelectorMixin from ..preprocessing import LabelEncoder, LabelBinarizer from ..svm.base import _fit_liblinear from ..utils import check_array, check_consistent_length, compute_class_weight from ..utils import check_random_state from ..utils.extmath import (logsumexp, log_logistic, safe_sparse_dot, softmax, squared_norm) from ..utils.extmath import row_norms from ..utils.optimize import newton_cg from ..utils.validation import check_X_y from ..exceptions import DataConversionWarning from ..exceptions import NotFittedError from ..utils.fixes import expit from ..utils.multiclass import check_classification_targets from ..externals.joblib import Parallel, delayed from ..model_selection import check_cv from ..externals import six from ..metrics import SCORERS # .. some helper functions for logistic_regression_path .. def _intercept_dot(w, X, y): """Computes y * np.dot(X, w). It takes into consideration if the intercept should be fit or not. Parameters ---------- w : ndarray, shape (n_features,) or (n_features + 1,) Coefficient vector. X : {array-like, sparse matrix}, shape (n_samples, n_features) Training data. y : ndarray, shape (n_samples,) Array of labels. """ c = 0. if w.size == X.shape[1] + 1: c = w[-1] w = w[:-1] z = safe_sparse_dot(X, w) + c return w, c, y * z def _logistic_loss_and_grad(w, X, y, alpha, sample_weight=None): """Computes the logistic loss and gradient. Parameters ---------- w : ndarray, shape (n_features,) or (n_features + 1,) Coefficient vector. X : {array-like, sparse matrix}, shape (n_samples, n_features) Training data. y : ndarray, shape (n_samples,) Array of labels. alpha : float Regularization parameter. alpha is equal to 1 / C. sample_weight : array-like, shape (n_samples,) optional Array of weights that are assigned to individual samples. If not provided, then each sample is given unit weight. Returns ------- out : float Logistic loss. grad : ndarray, shape (n_features,) or (n_features + 1,) Logistic gradient. """ _, n_features = X.shape grad = np.empty_like(w) w, c, yz = _intercept_dot(w, X, y) if sample_weight is None: sample_weight = np.ones(y.shape[0]) # Logistic loss is the negative of the log of the logistic function. out = -np.sum(sample_weight * log_logistic(yz)) + .5 * alpha * np.dot(w, w) z = expit(yz) z0 = sample_weight * (z - 1) * y grad[:n_features] = safe_sparse_dot(X.T, z0) + alpha * w # Case where we fit the intercept. if grad.shape[0] > n_features: grad[-1] = z0.sum() return out, grad def _logistic_loss(w, X, y, alpha, sample_weight=None): """Computes the logistic loss. Parameters ---------- w : ndarray, shape (n_features,) or (n_features + 1,) Coefficient vector. X : {array-like, sparse matrix}, shape (n_samples, n_features) Training data. y : ndarray, shape (n_samples,) Array of labels. alpha : float Regularization parameter. alpha is equal to 1 / C. sample_weight : array-like, shape (n_samples,) optional Array of weights that are assigned to individual samples. If not provided, then each sample is given unit weight. Returns ------- out : float Logistic loss. """ w, c, yz = _intercept_dot(w, X, y) if sample_weight is None: sample_weight = np.ones(y.shape[0]) # Logistic loss is the negative of the log of the logistic function. out = -np.sum(sample_weight * log_logistic(yz)) + .5 * alpha * np.dot(w, w) return out def _logistic_grad_hess(w, X, y, alpha, sample_weight=None): """Computes the gradient and the Hessian, in the case of a logistic loss. Parameters ---------- w : ndarray, shape (n_features,) or (n_features + 1,) Coefficient vector. X : {array-like, sparse matrix}, shape (n_samples, n_features) Training data. y : ndarray, shape (n_samples,) Array of labels. alpha : float Regularization parameter. alpha is equal to 1 / C. sample_weight : array-like, shape (n_samples,) optional Array of weights that are assigned to individual samples. If not provided, then each sample is given unit weight. Returns ------- grad : ndarray, shape (n_features,) or (n_features + 1,) Logistic gradient. Hs : callable Function that takes the gradient as a parameter and returns the matrix product of the Hessian and gradient. """ n_samples, n_features = X.shape grad = np.empty_like(w) fit_intercept = grad.shape[0] > n_features w, c, yz = _intercept_dot(w, X, y) if sample_weight is None: sample_weight = np.ones(y.shape[0]) z = expit(yz) z0 = sample_weight * (z - 1) * y grad[:n_features] = safe_sparse_dot(X.T, z0) + alpha * w # Case where we fit the intercept. if fit_intercept: grad[-1] = z0.sum() # The mat-vec product of the Hessian d = sample_weight * z * (1 - z) if sparse.issparse(X): dX = safe_sparse_dot(sparse.dia_matrix((d, 0), shape=(n_samples, n_samples)), X) else: # Precompute as much as possible dX = d[:, np.newaxis] * X if fit_intercept: # Calculate the double derivative with respect to intercept # In the case of sparse matrices this returns a matrix object. dd_intercept = np.squeeze(np.array(dX.sum(axis=0))) def Hs(s): ret = np.empty_like(s) ret[:n_features] = X.T.dot(dX.dot(s[:n_features])) ret[:n_features] += alpha * s[:n_features] # For the fit intercept case. if fit_intercept: ret[:n_features] += s[-1] * dd_intercept ret[-1] = dd_intercept.dot(s[:n_features]) ret[-1] += d.sum() * s[-1] return ret return grad, Hs def _multinomial_loss(w, X, Y, alpha, sample_weight): """Computes multinomial loss and class probabilities. Parameters ---------- w : ndarray, shape (n_classes * n_features,) or (n_classes * (n_features + 1),) Coefficient vector. X : {array-like, sparse matrix}, shape (n_samples, n_features) Training data. Y : ndarray, shape (n_samples, n_classes) Transformed labels according to the output of LabelBinarizer. alpha : float Regularization parameter. alpha is equal to 1 / C. sample_weight : array-like, shape (n_samples,) optional Array of weights that are assigned to individual samples. If not provided, then each sample is given unit weight. Returns ------- loss : float Multinomial loss. p : ndarray, shape (n_samples, n_classes) Estimated class probabilities. w : ndarray, shape (n_classes, n_features) Reshaped param vector excluding intercept terms. Reference --------- Bishop, C. M. (2006). Pattern recognition and machine learning. Springer. (Chapter 4.3.4) """ n_classes = Y.shape[1] n_features = X.shape[1] fit_intercept = w.size == (n_classes * (n_features + 1)) w = w.reshape(n_classes, -1) sample_weight = sample_weight[:, np.newaxis] if fit_intercept: intercept = w[:, -1] w = w[:, :-1] else: intercept = 0 p = safe_sparse_dot(X, w.T) p += intercept p -= logsumexp(p, axis=1)[:, np.newaxis] loss = -(sample_weight * Y * p).sum() loss += 0.5 * alpha * squared_norm(w) p = np.exp(p, p) return loss, p, w def _multinomial_loss_grad(w, X, Y, alpha, sample_weight): """Computes the multinomial loss, gradient and class probabilities. Parameters ---------- w : ndarray, shape (n_classes * n_features,) or (n_classes * (n_features + 1),) Coefficient vector. X : {array-like, sparse matrix}, shape (n_samples, n_features) Training data. Y : ndarray, shape (n_samples, n_classes) Transformed labels according to the output of LabelBinarizer. alpha : float Regularization parameter. alpha is equal to 1 / C. sample_weight : array-like, shape (n_samples,) optional Array of weights that are assigned to individual samples. Returns ------- loss : float Multinomial loss. grad : ndarray, shape (n_classes * n_features,) or (n_classes * (n_features + 1),) Ravelled gradient of the multinomial loss. p : ndarray, shape (n_samples, n_classes) Estimated class probabilities Reference --------- Bishop, C. M. (2006). Pattern recognition and machine learning. Springer. (Chapter 4.3.4) """ n_classes = Y.shape[1] n_features = X.shape[1] fit_intercept = (w.size == n_classes * (n_features + 1)) grad = np.zeros((n_classes, n_features + bool(fit_intercept))) loss, p, w = _multinomial_loss(w, X, Y, alpha, sample_weight) sample_weight = sample_weight[:, np.newaxis] diff = sample_weight * (p - Y) grad[:, :n_features] = safe_sparse_dot(diff.T, X) grad[:, :n_features] += alpha * w if fit_intercept: grad[:, -1] = diff.sum(axis=0) return loss, grad.ravel(), p def _multinomial_grad_hess(w, X, Y, alpha, sample_weight): """ Computes the gradient and the Hessian, in the case of a multinomial loss. Parameters ---------- w : ndarray, shape (n_classes * n_features,) or (n_classes * (n_features + 1),) Coefficient vector. X : {array-like, sparse matrix}, shape (n_samples, n_features) Training data. Y : ndarray, shape (n_samples, n_classes) Transformed labels according to the output of LabelBinarizer. alpha : float Regularization parameter. alpha is equal to 1 / C. sample_weight : array-like, shape (n_samples,) optional Array of weights that are assigned to individual samples. Returns ------- grad : array, shape (n_classes * n_features,) or (n_classes * (n_features + 1),) Ravelled gradient of the multinomial loss. hessp : callable Function that takes in a vector input of shape (n_classes * n_features) or (n_classes * (n_features + 1)) and returns matrix-vector product with hessian. References ---------- Barak A. Pearlmutter (1993). Fast Exact Multiplication by the Hessian. http://www.bcl.hamilton.ie/~barak/papers/nc-hessian.pdf """ n_features = X.shape[1] n_classes = Y.shape[1] fit_intercept = w.size == (n_classes * (n_features + 1)) # `loss` is unused. Refactoring to avoid computing it does not # significantly speed up the computation and decreases readability loss, grad, p = _multinomial_loss_grad(w, X, Y, alpha, sample_weight) sample_weight = sample_weight[:, np.newaxis] # Hessian-vector product derived by applying the R-operator on the gradient # of the multinomial loss function. def hessp(v): v = v.reshape(n_classes, -1) if fit_intercept: inter_terms = v[:, -1] v = v[:, :-1] else: inter_terms = 0 # r_yhat holds the result of applying the R-operator on the multinomial # estimator. r_yhat = safe_sparse_dot(X, v.T) r_yhat += inter_terms r_yhat += (-p * r_yhat).sum(axis=1)[:, np.newaxis] r_yhat *= p r_yhat *= sample_weight hessProd = np.zeros((n_classes, n_features + bool(fit_intercept))) hessProd[:, :n_features] = safe_sparse_dot(r_yhat.T, X) hessProd[:, :n_features] += v * alpha if fit_intercept: hessProd[:, -1] = r_yhat.sum(axis=0) return hessProd.ravel() return grad, hessp def _check_solver_option(solver, multi_class, penalty, dual): if solver not in ['liblinear', 'newton-cg', 'lbfgs', 'sag']: raise ValueError("Logistic Regression supports only liblinear," " newton-cg, lbfgs and sag solvers, got %s" % solver) if multi_class not in ['multinomial', 'ovr']: raise ValueError("multi_class should be either multinomial or " "ovr, got %s" % multi_class) if multi_class == 'multinomial' and solver == 'liblinear': raise ValueError("Solver %s does not support " "a multinomial backend." % solver) if solver != 'liblinear': if penalty != 'l2': raise ValueError("Solver %s supports only l2 penalties, " "got %s penalty." % (solver, penalty)) if dual: raise ValueError("Solver %s supports only " "dual=False, got dual=%s" % (solver, dual)) def logistic_regression_path(X, y, pos_class=None, Cs=10, fit_intercept=True, max_iter=100, tol=1e-4, verbose=0, solver='lbfgs', coef=None, copy=False, class_weight=None, dual=False, penalty='l2', intercept_scaling=1., multi_class='ovr', random_state=None, check_input=True, max_squared_sum=None, sample_weight=None): """Compute a Logistic Regression model for a list of regularization parameters. This is an implementation that uses the result of the previous model to speed up computations along the set of solutions, making it faster than sequentially calling LogisticRegression for the different parameters. Read more in the :ref:`User Guide <logistic_regression>`. Parameters ---------- X : array-like or sparse matrix, shape (n_samples, n_features) Input data. y : array-like, shape (n_samples,) Input data, target values. Cs : int | array-like, shape (n_cs,) List of values for the regularization parameter or integer specifying the number of regularization parameters that should be used. In this case, the parameters will be chosen in a logarithmic scale between 1e-4 and 1e4. pos_class : int, None The class with respect to which we perform a one-vs-all fit. If None, then it is assumed that the given problem is binary. fit_intercept : bool Whether to fit an intercept for the model. In this case the shape of the returned array is (n_cs, n_features + 1). max_iter : int Maximum number of iterations for the solver. tol : float Stopping criterion. For the newton-cg and lbfgs solvers, the iteration will stop when ``max{|g_i | i = 1, ..., n} <= tol`` where ``g_i`` is the i-th component of the gradient. verbose : int For the liblinear and lbfgs solvers set verbose to any positive number for verbosity. solver : {'lbfgs', 'newton-cg', 'liblinear', 'sag'} Numerical solver to use. coef : array-like, shape (n_features,), default None Initialization value for coefficients of logistic regression. Useless for liblinear solver. copy : bool, default False Whether or not to produce a copy of the data. A copy is not required anymore. This parameter is deprecated and will be removed in 0.19. class_weight : dict or 'balanced', optional Weights associated with classes in the form ``{class_label: weight}``. If not given, all classes are supposed to have weight one. The "balanced" mode uses the values of y to automatically adjust weights inversely proportional to class frequencies in the input data as ``n_samples / (n_classes * np.bincount(y))`` Note that these weights will be multiplied with sample_weight (passed through the fit method) if sample_weight is specified. dual : bool Dual or primal formulation. Dual formulation is only implemented for l2 penalty with liblinear solver. Prefer dual=False when n_samples > n_features. penalty : str, 'l1' or 'l2' Used to specify the norm used in the penalization. The 'newton-cg', 'sag' and 'lbfgs' solvers support only l2 penalties. intercept_scaling : float, default 1. This parameter is useful only when the solver 'liblinear' is used and self.fit_intercept is set to True. In this case, x becomes [x, self.intercept_scaling], i.e. a "synthetic" feature with constant value equals to intercept_scaling is appended to the instance vector. The intercept becomes intercept_scaling * synthetic feature weight Note! the synthetic feature weight is subject to l1/l2 regularization as all other features. To lessen the effect of regularization on synthetic feature weight (and therefore on the intercept) intercept_scaling has to be increased. multi_class : str, {'ovr', 'multinomial'} Multiclass option can be either 'ovr' or 'multinomial'. If the option chosen is 'ovr', then a binary problem is fit for each label. Else the loss minimised is the multinomial loss fit across the entire probability distribution. Works only for the 'lbfgs' and 'newton-cg' solvers. random_state : int seed, RandomState instance, or None (default) The seed of the pseudo random number generator to use when shuffling the data. Used only in solvers 'sag' and 'liblinear'. check_input : bool, default True If False, the input arrays X and y will not be checked. max_squared_sum : float, default None Maximum squared sum of X over samples. Used only in SAG solver. If None, it will be computed, going through all the samples. The value should be precomputed to speed up cross validation. sample_weight : array-like, shape(n_samples,) optional Array of weights that are assigned to individual samples. If not provided, then each sample is given unit weight. Returns ------- coefs : ndarray, shape (n_cs, n_features) or (n_cs, n_features + 1) List of coefficients for the Logistic Regression model. If fit_intercept is set to True then the second dimension will be n_features + 1, where the last item represents the intercept. Cs : ndarray Grid of Cs used for cross-validation. n_iter : array, shape (n_cs,) Actual number of iteration for each Cs. Notes ----- You might get slightly different results with the solver liblinear than with the others since this uses LIBLINEAR which penalizes the intercept. """ if copy: warnings.warn("A copy is not required anymore. The 'copy' parameter " "is deprecated and will be removed in 0.19.", DeprecationWarning) if isinstance(Cs, numbers.Integral): Cs = np.logspace(-4, 4, Cs) _check_solver_option(solver, multi_class, penalty, dual) # Preprocessing. if check_input or copy: X = check_array(X, accept_sparse='csr', dtype=np.float64) y = check_array(y, ensure_2d=False, copy=copy, dtype=None) check_consistent_length(X, y) _, n_features = X.shape classes = np.unique(y) random_state = check_random_state(random_state) if pos_class is None and multi_class != 'multinomial': if (classes.size > 2): raise ValueError('To fit OvR, use the pos_class argument') # np.unique(y) gives labels in sorted order. pos_class = classes[1] # If sample weights exist, convert them to array (support for lists) # and check length # Otherwise set them to 1 for all examples if sample_weight is not None: sample_weight = np.array(sample_weight, dtype=np.float64, order='C') check_consistent_length(y, sample_weight) else: sample_weight = np.ones(X.shape[0]) # If class_weights is a dict (provided by the user), the weights # are assigned to the original labels. If it is "balanced", then # the class_weights are assigned after masking the labels with a OvR. le = LabelEncoder() if isinstance(class_weight, dict) or multi_class == 'multinomial': if solver == "liblinear": if classes.size == 2: # Reconstruct the weights with keys 1 and -1 temp = {1: class_weight[pos_class], -1: class_weight[classes[0]]} class_weight = temp.copy() else: raise ValueError("In LogisticRegressionCV the liblinear " "solver cannot handle multiclass with " "class_weight of type dict. Use the lbfgs, " "newton-cg or sag solvers or set " "class_weight='balanced'") else: class_weight_ = compute_class_weight(class_weight, classes, y) sample_weight *= class_weight_[le.fit_transform(y)] # For doing a ovr, we need to mask the labels first. for the # multinomial case this is not necessary. if multi_class == 'ovr': w0 = np.zeros(n_features + int(fit_intercept)) mask_classes = np.array([-1, 1]) mask = (y == pos_class) y_bin = np.ones(y.shape, dtype=np.float64) y_bin[~mask] = -1. # for compute_class_weight # 'auto' is deprecated and will be removed in 0.19 if class_weight in ("auto", "balanced"): class_weight_ = compute_class_weight(class_weight, mask_classes, y_bin) sample_weight *= class_weight_[le.fit_transform(y_bin)] else: if solver != 'sag': lbin = LabelBinarizer() Y_multi = lbin.fit_transform(y) if Y_multi.shape[1] == 1: Y_multi = np.hstack([1 - Y_multi, Y_multi]) else: # SAG multinomial solver needs LabelEncoder, not LabelBinarizer le = LabelEncoder() Y_multi = le.fit_transform(y) w0 = np.zeros((classes.size, n_features + int(fit_intercept)), order='F') if coef is not None: # it must work both giving the bias term and not if multi_class == 'ovr': if coef.size not in (n_features, w0.size): raise ValueError( 'Initialization coef is of shape %d, expected shape ' '%d or %d' % (coef.size, n_features, w0.size)) w0[:coef.size] = coef else: # For binary problems coef.shape[0] should be 1, otherwise it # should be classes.size. n_classes = classes.size if n_classes == 2: n_classes = 1 if (coef.shape[0] != n_classes or coef.shape[1] not in (n_features, n_features + 1)): raise ValueError( 'Initialization coef is of shape (%d, %d), expected ' 'shape (%d, %d) or (%d, %d)' % ( coef.shape[0], coef.shape[1], classes.size, n_features, classes.size, n_features + 1)) w0[:, :coef.shape[1]] = coef if multi_class == 'multinomial': # fmin_l_bfgs_b and newton-cg accepts only ravelled parameters. if solver in ['lbfgs', 'newton-cg']: w0 = w0.ravel() target = Y_multi if solver == 'lbfgs': func = lambda x, *args: _multinomial_loss_grad(x, *args)[0:2] elif solver == 'newton-cg': func = lambda x, *args: _multinomial_loss(x, *args)[0] grad = lambda x, *args: _multinomial_loss_grad(x, *args)[1] hess = _multinomial_grad_hess warm_start_sag = {'coef': w0.T} else: target = y_bin if solver == 'lbfgs': func = _logistic_loss_and_grad elif solver == 'newton-cg': func = _logistic_loss grad = lambda x, *args: _logistic_loss_and_grad(x, *args)[1] hess = _logistic_grad_hess warm_start_sag = {'coef': np.expand_dims(w0, axis=1)} coefs = list() n_iter = np.zeros(len(Cs), dtype=np.int32) for i, C in enumerate(Cs): if solver == 'lbfgs': try: w0, loss, info = optimize.fmin_l_bfgs_b( func, w0, fprime=None, args=(X, target, 1. / C, sample_weight), iprint=(verbose > 0) - 1, pgtol=tol, maxiter=max_iter) except TypeError: # old scipy doesn't have maxiter w0, loss, info = optimize.fmin_l_bfgs_b( func, w0, fprime=None, args=(X, target, 1. / C, sample_weight), iprint=(verbose > 0) - 1, pgtol=tol) if info["warnflag"] == 1 and verbose > 0: warnings.warn("lbfgs failed to converge. Increase the number " "of iterations.") try: n_iter_i = info['nit'] - 1 except: n_iter_i = info['funcalls'] - 1 elif solver == 'newton-cg': args = (X, target, 1. / C, sample_weight) w0, n_iter_i = newton_cg(hess, func, grad, w0, args=args, maxiter=max_iter, tol=tol) elif solver == 'liblinear': coef_, intercept_, n_iter_i, = _fit_liblinear( X, target, C, fit_intercept, intercept_scaling, class_weight, penalty, dual, verbose, max_iter, tol, random_state, sample_weight=sample_weight) if fit_intercept: w0 = np.concatenate([coef_.ravel(), intercept_]) else: w0 = coef_.ravel() elif solver == 'sag': if multi_class == 'multinomial': target = target.astype(np.float64) loss = 'multinomial' else: loss = 'log' w0, n_iter_i, warm_start_sag = sag_solver( X, target, sample_weight, loss, 1. / C, max_iter, tol, verbose, random_state, False, max_squared_sum, warm_start_sag) else: raise ValueError("solver must be one of {'liblinear', 'lbfgs', " "'newton-cg', 'sag'}, got '%s' instead" % solver) if multi_class == 'multinomial': multi_w0 = np.reshape(w0, (classes.size, -1)) if classes.size == 2: multi_w0 = multi_w0[1][np.newaxis, :] coefs.append(multi_w0) else: coefs.append(w0.copy()) n_iter[i] = n_iter_i return coefs, np.array(Cs), n_iter # helper function for LogisticCV def _log_reg_scoring_path(X, y, train, test, pos_class=None, Cs=10, scoring=None, fit_intercept=False, max_iter=100, tol=1e-4, class_weight=None, verbose=0, solver='lbfgs', penalty='l2', dual=False, intercept_scaling=1., multi_class='ovr', random_state=None, max_squared_sum=None, sample_weight=None): """Computes scores across logistic_regression_path Parameters ---------- X : {array-like, sparse matrix}, shape (n_samples, n_features) Training data. y : array-like, shape (n_samples,) or (n_samples, n_targets) Target labels. train : list of indices The indices of the train set. test : list of indices The indices of the test set. pos_class : int, None The class with respect to which we perform a one-vs-all fit. If None, then it is assumed that the given problem is binary. Cs : list of floats | int Each of the values in Cs describes the inverse of regularization strength. If Cs is as an int, then a grid of Cs values are chosen in a logarithmic scale between 1e-4 and 1e4. If not provided, then a fixed set of values for Cs are used. scoring : callable For a list of scoring functions that can be used, look at :mod:`sklearn.metrics`. The default scoring option used is accuracy_score. fit_intercept : bool If False, then the bias term is set to zero. Else the last term of each coef_ gives us the intercept. max_iter : int Maximum number of iterations for the solver. tol : float Tolerance for stopping criteria. class_weight : dict or 'balanced', optional Weights associated with classes in the form ``{class_label: weight}``. If not given, all classes are supposed to have weight one. The "balanced" mode uses the values of y to automatically adjust weights inversely proportional to class frequencies in the input data as ``n_samples / (n_classes * np.bincount(y))`` Note that these weights will be multiplied with sample_weight (passed through the fit method) if sample_weight is specified. verbose : int For the liblinear and lbfgs solvers set verbose to any positive number for verbosity. solver : {'lbfgs', 'newton-cg', 'liblinear', 'sag'} Decides which solver to use. penalty : str, 'l1' or 'l2' Used to specify the norm used in the penalization. The newton-cg and lbfgs solvers support only l2 penalties. dual : bool Dual or primal formulation. Dual formulation is only implemented for l2 penalty with liblinear solver. Prefer dual=False when n_samples > n_features. intercept_scaling : float, default 1. This parameter is useful only when the solver 'liblinear' is used and self.fit_intercept is set to True. In this case, x becomes [x, self.intercept_scaling], i.e. a "synthetic" feature with constant value equals to intercept_scaling is appended to the instance vector. The intercept becomes intercept_scaling * synthetic feature weight Note! the synthetic feature weight is subject to l1/l2 regularization as all other features. To lessen the effect of regularization on synthetic feature weight (and therefore on the intercept) intercept_scaling has to be increased. multi_class : str, {'ovr', 'multinomial'} Multiclass option can be either 'ovr' or 'multinomial'. If the option chosen is 'ovr', then a binary problem is fit for each label. Else the loss minimised is the multinomial loss fit across the entire probability distribution. Works only for the 'lbfgs' and 'newton-cg' solver. random_state : int seed, RandomState instance, or None (default) The seed of the pseudo random number generator to use when shuffling the data. Used only in solvers 'sag' and 'liblinear'. max_squared_sum : float, default None Maximum squared sum of X over samples. Used only in SAG solver. If None, it will be computed, going through all the samples. The value should be precomputed to speed up cross validation. sample_weight : array-like, shape(n_samples,) optional Array of weights that are assigned to individual samples. If not provided, then each sample is given unit weight. Returns ------- coefs : ndarray, shape (n_cs, n_features) or (n_cs, n_features + 1) List of coefficients for the Logistic Regression model. If fit_intercept is set to True then the second dimension will be n_features + 1, where the last item represents the intercept. Cs : ndarray Grid of Cs used for cross-validation. scores : ndarray, shape (n_cs,) Scores obtained for each Cs. n_iter : array, shape(n_cs,) Actual number of iteration for each Cs. """ _check_solver_option(solver, multi_class, penalty, dual) X_train = X[train] X_test = X[test] y_train = y[train] y_test = y[test] if sample_weight is not None: sample_weight = sample_weight[train] coefs, Cs, n_iter = logistic_regression_path( X_train, y_train, Cs=Cs, fit_intercept=fit_intercept, solver=solver, max_iter=max_iter, class_weight=class_weight, pos_class=pos_class, multi_class=multi_class, tol=tol, verbose=verbose, dual=dual, penalty=penalty, intercept_scaling=intercept_scaling, random_state=random_state, check_input=False, max_squared_sum=max_squared_sum, sample_weight=sample_weight) log_reg = LogisticRegression(fit_intercept=fit_intercept) # The score method of Logistic Regression has a classes_ attribute. if multi_class == 'ovr': log_reg.classes_ = np.array([-1, 1]) elif multi_class == 'multinomial': log_reg.classes_ = np.unique(y_train) else: raise ValueError("multi_class should be either multinomial or ovr, " "got %d" % multi_class) if pos_class is not None: mask = (y_test == pos_class) y_test = np.ones(y_test.shape, dtype=np.float64) y_test[~mask] = -1. # To deal with object dtypes, we need to convert into an array of floats. y_test = check_array(y_test, dtype=np.float64, ensure_2d=False) scores = list() if isinstance(scoring, six.string_types): scoring = SCORERS[scoring] for w in coefs: if multi_class == 'ovr': w = w[np.newaxis, :] if fit_intercept: log_reg.coef_ = w[:, :-1] log_reg.intercept_ = w[:, -1] else: log_reg.coef_ = w log_reg.intercept_ = 0. if scoring is None: scores.append(log_reg.score(X_test, y_test)) else: scores.append(scoring(log_reg, X_test, y_test)) return coefs, Cs, np.array(scores), n_iter class LogisticRegression(BaseEstimator, LinearClassifierMixin, _LearntSelectorMixin, SparseCoefMixin): """Logistic Regression (aka logit, MaxEnt) classifier. In the multiclass case, the training algorithm uses the one-vs-rest (OvR) scheme if the 'multi_class' option is set to 'ovr' and uses the cross- entropy loss, if the 'multi_class' option is set to 'multinomial'. (Currently the 'multinomial' option is supported only by the 'lbfgs', 'sag' and 'newton-cg' solvers.) This class implements regularized logistic regression using the 'liblinear' library, 'newton-cg', 'sag' and 'lbfgs' solvers. It can handle both dense and sparse input. Use C-ordered arrays or CSR matrices containing 64-bit floats for optimal performance; any other input format will be converted (and copied). The 'newton-cg', 'sag', and 'lbfgs' solvers support only L2 regularization with primal formulation. The 'liblinear' solver supports both L1 and L2 regularization, with a dual formulation only for the L2 penalty. Read more in the :ref:`User Guide <logistic_regression>`. Parameters ---------- penalty : str, 'l1' or 'l2', default: 'l2' Used to specify the norm used in the penalization. The newton-cg, sag and lbfgs solvers support only l2 penalties. dual : bool, default: False Dual or primal formulation. Dual formulation is only implemented for l2 penalty with liblinear solver. Prefer dual=False when n_samples > n_features. C : float, default: 1.0 Inverse of regularization strength; must be a positive float. Like in support vector machines, smaller values specify stronger regularization. fit_intercept : bool, default: True Specifies if a constant (a.k.a. bias or intercept) should be added to the decision function. intercept_scaling : float, default: 1 Useful only if solver is liblinear. when self.fit_intercept is True, instance vector x becomes [x, self.intercept_scaling], i.e. a "synthetic" feature with constant value equals to intercept_scaling is appended to the instance vector. The intercept becomes intercept_scaling * synthetic feature weight Note! the synthetic feature weight is subject to l1/l2 regularization as all other features. To lessen the effect of regularization on synthetic feature weight (and therefore on the intercept) intercept_scaling has to be increased. class_weight : dict or 'balanced', default: None Weights associated with classes in the form ``{class_label: weight}``. If not given, all classes are supposed to have weight one. The "balanced" mode uses the values of y to automatically adjust weights inversely proportional to class frequencies in the input data as ``n_samples / (n_classes * np.bincount(y))`` Note that these weights will be multiplied with sample_weight (passed through the fit method) if sample_weight is specified. .. versionadded:: 0.17 *class_weight='balanced'* instead of deprecated *class_weight='auto'*. max_iter : int, default: 100 Useful only for the newton-cg, sag and lbfgs solvers. Maximum number of iterations taken for the solvers to converge. random_state : int seed, RandomState instance, default: None The seed of the pseudo random number generator to use when shuffling the data. Used only in solvers 'sag' and 'liblinear'. solver : {'newton-cg', 'lbfgs', 'liblinear', 'sag'}, default: 'liblinear' Algorithm to use in the optimization problem. - For small datasets, 'liblinear' is a good choice, whereas 'sag' is faster for large ones. - For multiclass problems, only 'newton-cg', 'sag' and 'lbfgs' handle multinomial loss; 'liblinear' is limited to one-versus-rest schemes. - 'newton-cg', 'lbfgs' and 'sag' only handle L2 penalty. Note that 'sag' fast convergence is only guaranteed on features with approximately the same scale. You can preprocess the data with a scaler from sklearn.preprocessing. .. versionadded:: 0.17 Stochastic Average Gradient descent solver. tol : float, default: 1e-4 Tolerance for stopping criteria. multi_class : str, {'ovr', 'multinomial'}, default: 'ovr' Multiclass option can be either 'ovr' or 'multinomial'. If the option chosen is 'ovr', then a binary problem is fit for each label. Else the loss minimised is the multinomial loss fit across the entire probability distribution. Works only for the 'newton-cg', 'sag' and 'lbfgs' solver. .. versionadded:: 0.18 Stochastic Average Gradient descent solver for 'multinomial' case. verbose : int, default: 0 For the liblinear and lbfgs solvers set verbose to any positive number for verbosity. warm_start : bool, default: False When set to True, reuse the solution of the previous call to fit as initialization, otherwise, just erase the previous solution. Useless for liblinear solver. .. versionadded:: 0.17 *warm_start* to support *lbfgs*, *newton-cg*, *sag* solvers. n_jobs : int, default: 1 Number of CPU cores used during the cross-validation loop. If given a value of -1, all cores are used. Attributes ---------- coef_ : array, shape (n_classes, n_features) Coefficient of the features in the decision function. intercept_ : array, shape (n_classes,) Intercept (a.k.a. bias) added to the decision function. If `fit_intercept` is set to False, the intercept is set to zero. n_iter_ : array, shape (n_classes,) or (1, ) Actual number of iterations for all classes. If binary or multinomial, it returns only 1 element. For liblinear solver, only the maximum number of iteration across all classes is given. See also -------- SGDClassifier : incrementally trained logistic regression (when given the parameter ``loss="log"``). sklearn.svm.LinearSVC : learns SVM models using the same algorithm. Notes ----- The underlying C implementation uses a random number generator to select features when fitting the model. It is thus not uncommon, to have slightly different results for the same input data. If that happens, try with a smaller tol parameter. Predict output may not match that of standalone liblinear in certain cases. See :ref:`differences from liblinear <liblinear_differences>` in the narrative documentation. References ---------- LIBLINEAR -- A Library for Large Linear Classification http://www.csie.ntu.edu.tw/~cjlin/liblinear/ Hsiang-Fu Yu, Fang-Lan Huang, Chih-Jen Lin (2011). Dual coordinate descent methods for logistic regression and maximum entropy models. Machine Learning 85(1-2):41-75. http://www.csie.ntu.edu.tw/~cjlin/papers/maxent_dual.pdf """ def __init__(self, penalty='l2', dual=False, tol=1e-4, C=1.0, fit_intercept=True, intercept_scaling=1, class_weight=None, random_state=None, solver='liblinear', max_iter=100, multi_class='ovr', verbose=0, warm_start=False, n_jobs=1): self.penalty = penalty self.dual = dual self.tol = tol self.C = C self.fit_intercept = fit_intercept self.intercept_scaling = intercept_scaling self.class_weight = class_weight self.random_state = random_state self.solver = solver self.max_iter = max_iter self.multi_class = multi_class self.verbose = verbose self.warm_start = warm_start self.n_jobs = n_jobs def fit(self, X, y, sample_weight=None): """Fit the model according to the given training data. Parameters ---------- X : {array-like, sparse matrix}, shape (n_samples, n_features) Training vector, where n_samples in the number of samples and n_features is the number of features. y : array-like, shape (n_samples,) Target vector relative to X. sample_weight : array-like, shape (n_samples,) optional Array of weights that are assigned to individual samples. If not provided, then each sample is given unit weight. .. versionadded:: 0.17 *sample_weight* support to LogisticRegression. Returns ------- self : object Returns self. """ if not isinstance(self.C, numbers.Number) or self.C < 0: raise ValueError("Penalty term must be positive; got (C=%r)" % self.C) if not isinstance(self.max_iter, numbers.Number) or self.max_iter < 0: raise ValueError("Maximum number of iteration must be positive;" " got (max_iter=%r)" % self.max_iter) if not isinstance(self.tol, numbers.Number) or self.tol < 0: raise ValueError("Tolerance for stopping criteria must be " "positive; got (tol=%r)" % self.tol) X, y = check_X_y(X, y, accept_sparse='csr', dtype=np.float64, order="C") check_classification_targets(y) self.classes_ = np.unique(y) n_samples, n_features = X.shape _check_solver_option(self.solver, self.multi_class, self.penalty, self.dual) if self.solver == 'liblinear': self.coef_, self.intercept_, n_iter_ = _fit_liblinear( X, y, self.C, self.fit_intercept, self.intercept_scaling, self.class_weight, self.penalty, self.dual, self.verbose, self.max_iter, self.tol, self.random_state, sample_weight=sample_weight) self.n_iter_ = np.array([n_iter_]) return self if self.solver == 'sag': max_squared_sum = row_norms(X, squared=True).max() else: max_squared_sum = None n_classes = len(self.classes_) classes_ = self.classes_ if n_classes < 2: raise ValueError("This solver needs samples of at least 2 classes" " in the data, but the data contains only one" " class: %r" % classes_[0]) if len(self.classes_) == 2: n_classes = 1 classes_ = classes_[1:] if self.warm_start: warm_start_coef = getattr(self, 'coef_', None) else: warm_start_coef = None if warm_start_coef is not None and self.fit_intercept: warm_start_coef = np.append(warm_start_coef, self.intercept_[:, np.newaxis], axis=1) self.coef_ = list() self.intercept_ = np.zeros(n_classes) # Hack so that we iterate only once for the multinomial case. if self.multi_class == 'multinomial': classes_ = [None] warm_start_coef = [warm_start_coef] if warm_start_coef is None: warm_start_coef = [None] * n_classes path_func = delayed(logistic_regression_path) # The SAG solver releases the GIL so it's more efficient to use # threads for this solver. backend = 'threading' if self.solver == 'sag' else 'multiprocessing' fold_coefs_ = Parallel(n_jobs=self.n_jobs, verbose=self.verbose, backend=backend)( path_func(X, y, pos_class=class_, Cs=[self.C], fit_intercept=self.fit_intercept, tol=self.tol, verbose=self.verbose, solver=self.solver, copy=False, multi_class=self.multi_class, max_iter=self.max_iter, class_weight=self.class_weight, check_input=False, random_state=self.random_state, coef=warm_start_coef_, max_squared_sum=max_squared_sum, sample_weight=sample_weight) for (class_, warm_start_coef_) in zip(classes_, warm_start_coef)) fold_coefs_, _, n_iter_ = zip(*fold_coefs_) self.n_iter_ = np.asarray(n_iter_, dtype=np.int32)[:, 0] if self.multi_class == 'multinomial': self.coef_ = fold_coefs_[0][0] else: self.coef_ = np.asarray(fold_coefs_) self.coef_ = self.coef_.reshape(n_classes, n_features + int(self.fit_intercept)) if self.fit_intercept: self.intercept_ = self.coef_[:, -1] self.coef_ = self.coef_[:, :-1] return self def predict_proba(self, X): """Probability estimates. The returned estimates for all classes are ordered by the label of classes. For a multi_class problem, if multi_class is set to be "multinomial" the softmax function is used to find the predicted probability of each class. Else use a one-vs-rest approach, i.e calculate the probability of each class assuming it to be positive using the logistic function. and normalize these values across all the classes. Parameters ---------- X : array-like, shape = [n_samples, n_features] Returns ------- T : array-like, shape = [n_samples, n_classes] Returns the probability of the sample for each class in the model, where classes are ordered as they are in ``self.classes_``. """ if not hasattr(self, "coef_"): raise NotFittedError("Call fit before prediction") calculate_ovr = self.coef_.shape[0] == 1 or self.multi_class == "ovr" if calculate_ovr: return super(LogisticRegression, self)._predict_proba_lr(X) else: return softmax(self.decision_function(X), copy=False) def predict_log_proba(self, X): """Log of probability estimates. The returned estimates for all classes are ordered by the label of classes. Parameters ---------- X : array-like, shape = [n_samples, n_features] Returns ------- T : array-like, shape = [n_samples, n_classes] Returns the log-probability of the sample for each class in the model, where classes are ordered as they are in ``self.classes_``. """ return np.log(self.predict_proba(X)) class LogisticRegressionCV(LogisticRegression, BaseEstimator, LinearClassifierMixin, _LearntSelectorMixin): """Logistic Regression CV (aka logit, MaxEnt) classifier. This class implements logistic regression using liblinear, newton-cg, sag of lbfgs optimizer. The newton-cg, sag and lbfgs solvers support only L2 regularization with primal formulation. The liblinear solver supports both L1 and L2 regularization, with a dual formulation only for the L2 penalty. For the grid of Cs values (that are set by default to be ten values in a logarithmic scale between 1e-4 and 1e4), the best hyperparameter is selected by the cross-validator StratifiedKFold, but it can be changed using the cv parameter. In the case of newton-cg and lbfgs solvers, we warm start along the path i.e guess the initial coefficients of the present fit to be the coefficients got after convergence in the previous fit, so it is supposed to be faster for high-dimensional dense data. For a multiclass problem, the hyperparameters for each class are computed using the best scores got by doing a one-vs-rest in parallel across all folds and classes. Hence this is not the true multinomial loss. Read more in the :ref:`User Guide <logistic_regression>`. Parameters ---------- Cs : list of floats | int Each of the values in Cs describes the inverse of regularization strength. If Cs is as an int, then a grid of Cs values are chosen in a logarithmic scale between 1e-4 and 1e4. Like in support vector machines, smaller values specify stronger regularization. fit_intercept : bool, default: True Specifies if a constant (a.k.a. bias or intercept) should be added to the decision function. class_weight : dict or 'balanced', optional Weights associated with classes in the form ``{class_label: weight}``. If not given, all classes are supposed to have weight one. The "balanced" mode uses the values of y to automatically adjust weights inversely proportional to class frequencies in the input data as ``n_samples / (n_classes * np.bincount(y))`` Note that these weights will be multiplied with sample_weight (passed through the fit method) if sample_weight is specified. .. versionadded:: 0.17 class_weight == 'balanced' cv : integer or cross-validation generator The default cross-validation generator used is Stratified K-Folds. If an integer is provided, then it is the number of folds used. See the module :mod:`sklearn.model_selection` module for the list of possible cross-validation objects. penalty : str, 'l1' or 'l2' Used to specify the norm used in the penalization. The newton-cg and lbfgs solvers support only l2 penalties. dual : bool Dual or primal formulation. Dual formulation is only implemented for l2 penalty with liblinear solver. Prefer dual=False when n_samples > n_features. scoring : callabale Scoring function to use as cross-validation criteria. For a list of scoring functions that can be used, look at :mod:`sklearn.metrics`. The default scoring option used is accuracy_score. solver : {'newton-cg', 'lbfgs', 'liblinear', 'sag'} Algorithm to use in the optimization problem. - For small datasets, 'liblinear' is a good choice, whereas 'sag' is faster for large ones. - For multiclass problems, only 'newton-cg', 'sag' and 'lbfgs' handle multinomial loss; 'liblinear' is limited to one-versus-rest schemes. - 'newton-cg', 'lbfgs' and 'sag' only handle L2 penalty. - 'liblinear' might be slower in LogisticRegressionCV because it does not handle warm-starting. Note that 'sag' fast convergence is only guaranteed on features with approximately the same scale. You can preprocess the data with a scaler from sklearn.preprocessing. .. versionadded:: 0.17 Stochastic Average Gradient descent solver. tol : float, optional Tolerance for stopping criteria. max_iter : int, optional Maximum number of iterations of the optimization algorithm. n_jobs : int, optional Number of CPU cores used during the cross-validation loop. If given a value of -1, all cores are used. verbose : int For the 'liblinear', 'sag' and 'lbfgs' solvers set verbose to any positive number for verbosity. refit : bool If set to True, the scores are averaged across all folds, and the coefs and the C that corresponds to the best score is taken, and a final refit is done using these parameters. Otherwise the coefs, intercepts and C that correspond to the best scores across folds are averaged. multi_class : str, {'ovr', 'multinomial'} Multiclass option can be either 'ovr' or 'multinomial'. If the option chosen is 'ovr', then a binary problem is fit for each label. Else the loss minimised is the multinomial loss fit across the entire probability distribution. Works only for the 'newton-cg', 'sag' and 'lbfgs' solver. .. versionadded:: 0.18 Stochastic Average Gradient descent solver for 'multinomial' case. intercept_scaling : float, default 1. Useful only if solver is liblinear. This parameter is useful only when the solver 'liblinear' is used and self.fit_intercept is set to True. In this case, x becomes [x, self.intercept_scaling], i.e. a "synthetic" feature with constant value equals to intercept_scaling is appended to the instance vector. The intercept becomes intercept_scaling * synthetic feature weight Note! the synthetic feature weight is subject to l1/l2 regularization as all other features. To lessen the effect of regularization on synthetic feature weight (and therefore on the intercept) intercept_scaling has to be increased. random_state : int seed, RandomState instance, or None (default) The seed of the pseudo random number generator to use when shuffling the data. Attributes ---------- coef_ : array, shape (1, n_features) or (n_classes, n_features) Coefficient of the features in the decision function. `coef_` is of shape (1, n_features) when the given problem is binary. `coef_` is readonly property derived from `raw_coef_` that follows the internal memory layout of liblinear. intercept_ : array, shape (1,) or (n_classes,) Intercept (a.k.a. bias) added to the decision function. It is available only when parameter intercept is set to True and is of shape(1,) when the problem is binary. Cs_ : array Array of C i.e. inverse of regularization parameter values used for cross-validation. coefs_paths_ : array, shape ``(n_folds, len(Cs_), n_features)`` or \ ``(n_folds, len(Cs_), n_features + 1)`` dict with classes as the keys, and the path of coefficients obtained during cross-validating across each fold and then across each Cs after doing an OvR for the corresponding class as values. If the 'multi_class' option is set to 'multinomial', then the coefs_paths are the coefficients corresponding to each class. Each dict value has shape ``(n_folds, len(Cs_), n_features)`` or ``(n_folds, len(Cs_), n_features + 1)`` depending on whether the intercept is fit or not. scores_ : dict dict with classes as the keys, and the values as the grid of scores obtained during cross-validating each fold, after doing an OvR for the corresponding class. If the 'multi_class' option given is 'multinomial' then the same scores are repeated across all classes, since this is the multinomial class. Each dict value has shape (n_folds, len(Cs)) C_ : array, shape (n_classes,) or (n_classes - 1,) Array of C that maps to the best scores across every class. If refit is set to False, then for each class, the best C is the average of the C's that correspond to the best scores for each fold. n_iter_ : array, shape (n_classes, n_folds, n_cs) or (1, n_folds, n_cs) Actual number of iterations for all classes, folds and Cs. In the binary or multinomial cases, the first dimension is equal to 1. See also -------- LogisticRegression """ def __init__(self, Cs=10, fit_intercept=True, cv=None, dual=False, penalty='l2', scoring=None, solver='lbfgs', tol=1e-4, max_iter=100, class_weight=None, n_jobs=1, verbose=0, refit=True, intercept_scaling=1., multi_class='ovr', random_state=None): self.Cs = Cs self.fit_intercept = fit_intercept self.cv = cv self.dual = dual self.penalty = penalty self.scoring = scoring self.tol = tol self.max_iter = max_iter self.class_weight = class_weight self.n_jobs = n_jobs self.verbose = verbose self.solver = solver self.refit = refit self.intercept_scaling = intercept_scaling self.multi_class = multi_class self.random_state = random_state def fit(self, X, y, sample_weight=None): """Fit the model according to the given training data. Parameters ---------- X : {array-like, sparse matrix}, shape (n_samples, n_features) Training vector, where n_samples in the number of samples and n_features is the number of features. y : array-like, shape (n_samples,) Target vector relative to X. sample_weight : array-like, shape (n_samples,) optional Array of weights that are assigned to individual samples. If not provided, then each sample is given unit weight. Returns ------- self : object Returns self. """ _check_solver_option(self.solver, self.multi_class, self.penalty, self.dual) if not isinstance(self.max_iter, numbers.Number) or self.max_iter < 0: raise ValueError("Maximum number of iteration must be positive;" " got (max_iter=%r)" % self.max_iter) if not isinstance(self.tol, numbers.Number) or self.tol < 0: raise ValueError("Tolerance for stopping criteria must be " "positive; got (tol=%r)" % self.tol) X, y = check_X_y(X, y, accept_sparse='csr', dtype=np.float64, order="C") if self.solver == 'sag': max_squared_sum = row_norms(X, squared=True).max() else: max_squared_sum = None check_classification_targets(y) if y.ndim == 2 and y.shape[1] == 1: warnings.warn( "A column-vector y was passed when a 1d array was" " expected. Please change the shape of y to " "(n_samples, ), for example using ravel().", DataConversionWarning) y = np.ravel(y) check_consistent_length(X, y) # init cross-validation generator cv = check_cv(self.cv, y, classifier=True) folds = list(cv.split(X, y)) self._enc = LabelEncoder() self._enc.fit(y) labels = self.classes_ = np.unique(y) n_classes = len(labels) if n_classes < 2: raise ValueError("This solver needs samples of at least 2 classes" " in the data, but the data contains only one" " class: %r" % self.classes_[0]) if n_classes == 2: # OvR in case of binary problems is as good as fitting # the higher label n_classes = 1 labels = labels[1:] # We need this hack to iterate only once over labels, in the case of # multi_class = multinomial, without changing the value of the labels. iter_labels = labels if self.multi_class == 'multinomial': iter_labels = [None] if self.class_weight and not(isinstance(self.class_weight, dict) or self.class_weight in ['balanced', 'auto']): # 'auto' is deprecated and will be removed in 0.19 raise ValueError("class_weight provided should be a " "dict or 'balanced'") # compute the class weights for the entire dataset y if self.class_weight in ("auto", "balanced"): classes = np.unique(y) class_weight = compute_class_weight(self.class_weight, classes, y) class_weight = dict(zip(classes, class_weight)) else: class_weight = self.class_weight path_func = delayed(_log_reg_scoring_path) # The SAG solver releases the GIL so it's more efficient to use # threads for this solver. backend = 'threading' if self.solver == 'sag' else 'multiprocessing' fold_coefs_ = Parallel(n_jobs=self.n_jobs, verbose=self.verbose, backend=backend)( path_func(X, y, train, test, pos_class=label, Cs=self.Cs, fit_intercept=self.fit_intercept, penalty=self.penalty, dual=self.dual, solver=self.solver, tol=self.tol, max_iter=self.max_iter, verbose=self.verbose, class_weight=class_weight, scoring=self.scoring, multi_class=self.multi_class, intercept_scaling=self.intercept_scaling, random_state=self.random_state, max_squared_sum=max_squared_sum, sample_weight=sample_weight ) for label in iter_labels for train, test in folds) if self.multi_class == 'multinomial': multi_coefs_paths, Cs, multi_scores, n_iter_ = zip(*fold_coefs_) multi_coefs_paths = np.asarray(multi_coefs_paths) multi_scores = np.asarray(multi_scores) # This is just to maintain API similarity between the ovr and # multinomial option. # Coefs_paths in now n_folds X len(Cs) X n_classes X n_features # we need it to be n_classes X len(Cs) X n_folds X n_features # to be similar to "ovr". coefs_paths = np.rollaxis(multi_coefs_paths, 2, 0) # Multinomial has a true score across all labels. Hence the # shape is n_folds X len(Cs). We need to repeat this score # across all labels for API similarity. scores = np.tile(multi_scores, (n_classes, 1, 1)) self.Cs_ = Cs[0] self.n_iter_ = np.reshape(n_iter_, (1, len(folds), len(self.Cs_))) else: coefs_paths, Cs, scores, n_iter_ = zip(*fold_coefs_) self.Cs_ = Cs[0] coefs_paths = np.reshape(coefs_paths, (n_classes, len(folds), len(self.Cs_), -1)) self.n_iter_ = np.reshape(n_iter_, (n_classes, len(folds), len(self.Cs_))) self.coefs_paths_ = dict(zip(labels, coefs_paths)) scores = np.reshape(scores, (n_classes, len(folds), -1)) self.scores_ = dict(zip(labels, scores)) self.C_ = list() self.coef_ = np.empty((n_classes, X.shape[1])) self.intercept_ = np.zeros(n_classes) # hack to iterate only once for multinomial case. if self.multi_class == 'multinomial': scores = multi_scores coefs_paths = multi_coefs_paths for index, label in enumerate(iter_labels): if self.multi_class == 'ovr': scores = self.scores_[label] coefs_paths = self.coefs_paths_[label] if self.refit: best_index = scores.sum(axis=0).argmax() C_ = self.Cs_[best_index] self.C_.append(C_) if self.multi_class == 'multinomial': coef_init = np.mean(coefs_paths[:, best_index, :, :], axis=0) else: coef_init = np.mean(coefs_paths[:, best_index, :], axis=0) w, _, _ = logistic_regression_path( X, y, pos_class=label, Cs=[C_], solver=self.solver, fit_intercept=self.fit_intercept, coef=coef_init, max_iter=self.max_iter, tol=self.tol, penalty=self.penalty, copy=False, class_weight=class_weight, multi_class=self.multi_class, verbose=max(0, self.verbose - 1), random_state=self.random_state, check_input=False, max_squared_sum=max_squared_sum, sample_weight=sample_weight) w = w[0] else: # Take the best scores across every fold and the average of all # coefficients corresponding to the best scores. best_indices = np.argmax(scores, axis=1) w = np.mean([coefs_paths[i][best_indices[i]] for i in range(len(folds))], axis=0) self.C_.append(np.mean(self.Cs_[best_indices])) if self.multi_class == 'multinomial': self.C_ = np.tile(self.C_, n_classes) self.coef_ = w[:, :X.shape[1]] if self.fit_intercept: self.intercept_ = w[:, -1] else: self.coef_[index] = w[: X.shape[1]] if self.fit_intercept: self.intercept_[index] = w[-1] self.C_ = np.asarray(self.C_) return self
bsd-3-clause
RayMick/scikit-learn
examples/neighbors/plot_species_kde.py
282
4059
""" ================================================ Kernel Density Estimate of Species Distributions ================================================ This shows an example of a neighbors-based query (in particular a kernel density estimate) on geospatial data, using a Ball Tree built upon the Haversine distance metric -- i.e. distances over points in latitude/longitude. The dataset is provided by Phillips et. al. (2006). If available, the example uses `basemap <http://matplotlib.sourceforge.net/basemap/doc/html/>`_ to plot the coast lines and national boundaries of South America. This example does not perform any learning over the data (see :ref:`example_applications_plot_species_distribution_modeling.py` for an example of classification based on the attributes in this dataset). It simply shows the kernel density estimate of observed data points in geospatial coordinates. The two species are: - `"Bradypus variegatus" <http://www.iucnredlist.org/apps/redlist/details/3038/0>`_ , the Brown-throated Sloth. - `"Microryzomys minutus" <http://www.iucnredlist.org/apps/redlist/details/13408/0>`_ , also known as the Forest Small Rice Rat, a rodent that lives in Peru, Colombia, Ecuador, Peru, and Venezuela. References ---------- * `"Maximum entropy modeling of species geographic distributions" <http://www.cs.princeton.edu/~schapire/papers/ecolmod.pdf>`_ S. J. Phillips, R. P. Anderson, R. E. Schapire - Ecological Modelling, 190:231-259, 2006. """ # Author: Jake Vanderplas <jakevdp@cs.washington.edu> # # License: BSD 3 clause import numpy as np import matplotlib.pyplot as plt from sklearn.datasets import fetch_species_distributions from sklearn.datasets.species_distributions import construct_grids from sklearn.neighbors import KernelDensity # if basemap is available, we'll use it. # otherwise, we'll improvise later... try: from mpl_toolkits.basemap import Basemap basemap = True except ImportError: basemap = False # Get matrices/arrays of species IDs and locations data = fetch_species_distributions() species_names = ['Bradypus Variegatus', 'Microryzomys Minutus'] Xtrain = np.vstack([data['train']['dd lat'], data['train']['dd long']]).T ytrain = np.array([d.decode('ascii').startswith('micro') for d in data['train']['species']], dtype='int') Xtrain *= np.pi / 180. # Convert lat/long to radians # Set up the data grid for the contour plot xgrid, ygrid = construct_grids(data) X, Y = np.meshgrid(xgrid[::5], ygrid[::5][::-1]) land_reference = data.coverages[6][::5, ::5] land_mask = (land_reference > -9999).ravel() xy = np.vstack([Y.ravel(), X.ravel()]).T xy = xy[land_mask] xy *= np.pi / 180. # Plot map of South America with distributions of each species fig = plt.figure() fig.subplots_adjust(left=0.05, right=0.95, wspace=0.05) for i in range(2): plt.subplot(1, 2, i + 1) # construct a kernel density estimate of the distribution print(" - computing KDE in spherical coordinates") kde = KernelDensity(bandwidth=0.04, metric='haversine', kernel='gaussian', algorithm='ball_tree') kde.fit(Xtrain[ytrain == i]) # evaluate only on the land: -9999 indicates ocean Z = -9999 + np.zeros(land_mask.shape[0]) Z[land_mask] = np.exp(kde.score_samples(xy)) Z = Z.reshape(X.shape) # plot contours of the density levels = np.linspace(0, Z.max(), 25) plt.contourf(X, Y, Z, levels=levels, cmap=plt.cm.Reds) if basemap: print(" - plot coastlines using basemap") m = Basemap(projection='cyl', llcrnrlat=Y.min(), urcrnrlat=Y.max(), llcrnrlon=X.min(), urcrnrlon=X.max(), resolution='c') m.drawcoastlines() m.drawcountries() else: print(" - plot coastlines from coverage") plt.contour(X, Y, land_reference, levels=[-9999], colors="k", linestyles="solid") plt.xticks([]) plt.yticks([]) plt.title(species_names[i]) plt.show()
bsd-3-clause
nvoron23/statsmodels
statsmodels/sandbox/examples/try_multiols.py
33
1243
# -*- coding: utf-8 -*- """ Created on Sun May 26 13:23:40 2013 Author: Josef Perktold, based on Enrico Giampieri's multiOLS """ #import numpy as np import pandas as pd import statsmodels.api as sm from statsmodels.sandbox.multilinear import multiOLS, multigroup data = sm.datasets.longley.load_pandas() df = data.exog df['TOTEMP'] = data.endog #This will perform the specified linear model on all the #other columns of the dataframe res0 = multiOLS('GNP + 1', df) #This select only a certain subset of the columns res = multiOLS('GNP + 0', df, ['GNPDEFL', 'TOTEMP', 'POP']) print(res.to_string()) url = "http://vincentarelbundock.github.com/" url = url + "Rdatasets/csv/HistData/Guerry.csv" df = pd.read_csv(url, index_col=1) #'dept') #evaluate the relationship between the various parameters whith the Wealth pvals = multiOLS('Wealth', df)['adj_pvals', '_f_test'] #define the groups groups = {} groups['crime'] = ['Crime_prop', 'Infanticide', 'Crime_parents', 'Desertion', 'Crime_pers'] groups['religion'] = ['Donation_clergy', 'Clergy', 'Donations'] groups['wealth'] = ['Commerce', 'Lottery', 'Instruction', 'Literacy'] #do the analysis of the significance res3 = multigroup(pvals < 0.05, groups) print(res3)
bsd-3-clause
nomadcube/scikit-learn
examples/mixture/plot_gmm_pdf.py
284
1528
""" ============================================= Density Estimation for a mixture of Gaussians ============================================= Plot the density estimation of a mixture of two Gaussians. Data is generated from two Gaussians with different centers and covariance matrices. """ import numpy as np import matplotlib.pyplot as plt from matplotlib.colors import LogNorm from sklearn import mixture n_samples = 300 # generate random sample, two components np.random.seed(0) # generate spherical data centered on (20, 20) shifted_gaussian = np.random.randn(n_samples, 2) + np.array([20, 20]) # generate zero centered stretched Gaussian data C = np.array([[0., -0.7], [3.5, .7]]) stretched_gaussian = np.dot(np.random.randn(n_samples, 2), C) # concatenate the two datasets into the final training set X_train = np.vstack([shifted_gaussian, stretched_gaussian]) # fit a Gaussian Mixture Model with two components clf = mixture.GMM(n_components=2, covariance_type='full') clf.fit(X_train) # display predicted scores by the model as a contour plot x = np.linspace(-20.0, 30.0) y = np.linspace(-20.0, 40.0) X, Y = np.meshgrid(x, y) XX = np.array([X.ravel(), Y.ravel()]).T Z = -clf.score_samples(XX)[0] Z = Z.reshape(X.shape) CS = plt.contour(X, Y, Z, norm=LogNorm(vmin=1.0, vmax=1000.0), levels=np.logspace(0, 3, 10)) CB = plt.colorbar(CS, shrink=0.8, extend='both') plt.scatter(X_train[:, 0], X_train[:, 1], .8) plt.title('Negative log-likelihood predicted by a GMM') plt.axis('tight') plt.show()
bsd-3-clause
kaichogami/sympy
sympy/physics/quantum/state.py
58
29186
"""Dirac notation for states.""" from __future__ import print_function, division from sympy import (cacheit, conjugate, Expr, Function, integrate, oo, sqrt, Tuple) from sympy.core.compatibility import u, range from sympy.printing.pretty.stringpict import stringPict from sympy.physics.quantum.qexpr import QExpr, dispatch_method __all__ = [ 'KetBase', 'BraBase', 'StateBase', 'State', 'Ket', 'Bra', 'TimeDepState', 'TimeDepBra', 'TimeDepKet', 'Wavefunction' ] #----------------------------------------------------------------------------- # States, bras and kets. #----------------------------------------------------------------------------- # ASCII brackets _lbracket = "<" _rbracket = ">" _straight_bracket = "|" # Unicode brackets # MATHEMATICAL ANGLE BRACKETS _lbracket_ucode = u("\N{MATHEMATICAL LEFT ANGLE BRACKET}") _rbracket_ucode = u("\N{MATHEMATICAL RIGHT ANGLE BRACKET}") # LIGHT VERTICAL BAR _straight_bracket_ucode = u("\N{LIGHT VERTICAL BAR}") # Other options for unicode printing of <, > and | for Dirac notation. # LEFT-POINTING ANGLE BRACKET # _lbracket = u"\u2329" # _rbracket = u"\u232A" # LEFT ANGLE BRACKET # _lbracket = u"\u3008" # _rbracket = u"\u3009" # VERTICAL LINE # _straight_bracket = u"\u007C" class StateBase(QExpr): """Abstract base class for general abstract states in quantum mechanics. All other state classes defined will need to inherit from this class. It carries the basic structure for all other states such as dual, _eval_adjoint and label. This is an abstract base class and you should not instantiate it directly, instead use State. """ @classmethod def _operators_to_state(self, ops, **options): """ Returns the eigenstate instance for the passed operators. This method should be overridden in subclasses. It will handle being passed either an Operator instance or set of Operator instances. It should return the corresponding state INSTANCE or simply raise a NotImplementedError. See cartesian.py for an example. """ raise NotImplementedError("Cannot map operators to states in this class. Method not implemented!") def _state_to_operators(self, op_classes, **options): """ Returns the operators which this state instance is an eigenstate of. This method should be overridden in subclasses. It will be called on state instances and be passed the operator classes that we wish to make into instances. The state instance will then transform the classes appropriately, or raise a NotImplementedError if it cannot return operator instances. See cartesian.py for examples, """ raise NotImplementedError( "Cannot map this state to operators. Method not implemented!") @property def operators(self): """Return the operator(s) that this state is an eigenstate of""" from .operatorset import state_to_operators # import internally to avoid circular import errors return state_to_operators(self) def _enumerate_state(self, num_states, **options): raise NotImplementedError("Cannot enumerate this state!") def _represent_default_basis(self, **options): return self._represent(basis=self.operators) #------------------------------------------------------------------------- # Dagger/dual #------------------------------------------------------------------------- @property def dual(self): """Return the dual state of this one.""" return self.dual_class()._new_rawargs(self.hilbert_space, *self.args) @classmethod def dual_class(self): """Return the class used to construt the dual.""" raise NotImplementedError( 'dual_class must be implemented in a subclass' ) def _eval_adjoint(self): """Compute the dagger of this state using the dual.""" return self.dual #------------------------------------------------------------------------- # Printing #------------------------------------------------------------------------- def _pretty_brackets(self, height, use_unicode=True): # Return pretty printed brackets for the state # Ideally, this could be done by pform.parens but it does not support the angled < and > # Setup for unicode vs ascii if use_unicode: lbracket, rbracket = self.lbracket_ucode, self.rbracket_ucode slash, bslash, vert = u('\N{BOX DRAWINGS LIGHT DIAGONAL UPPER RIGHT TO LOWER LEFT}'), \ u('\N{BOX DRAWINGS LIGHT DIAGONAL UPPER LEFT TO LOWER RIGHT}'), \ u('\N{BOX DRAWINGS LIGHT VERTICAL}') else: lbracket, rbracket = self.lbracket, self.rbracket slash, bslash, vert = '/', '\\', '|' # If height is 1, just return brackets if height == 1: return stringPict(lbracket), stringPict(rbracket) # Make height even height += (height % 2) brackets = [] for bracket in lbracket, rbracket: # Create left bracket if bracket in set([_lbracket, _lbracket_ucode]): bracket_args = [ ' ' * (height//2 - i - 1) + slash for i in range(height // 2)] bracket_args.extend( [ ' ' * i + bslash for i in range(height // 2)]) # Create right bracket elif bracket in set([_rbracket, _rbracket_ucode]): bracket_args = [ ' ' * i + bslash for i in range(height // 2)] bracket_args.extend([ ' ' * ( height//2 - i - 1) + slash for i in range(height // 2)]) # Create straight bracket elif bracket in set([_straight_bracket, _straight_bracket_ucode]): bracket_args = [vert for i in range(height)] else: raise ValueError(bracket) brackets.append( stringPict('\n'.join(bracket_args), baseline=height//2)) return brackets def _sympystr(self, printer, *args): contents = self._print_contents(printer, *args) return '%s%s%s' % (self.lbracket, contents, self.rbracket) def _pretty(self, printer, *args): from sympy.printing.pretty.stringpict import prettyForm # Get brackets pform = self._print_contents_pretty(printer, *args) lbracket, rbracket = self._pretty_brackets( pform.height(), printer._use_unicode) # Put together state pform = prettyForm(*pform.left(lbracket)) pform = prettyForm(*pform.right(rbracket)) return pform def _latex(self, printer, *args): contents = self._print_contents_latex(printer, *args) # The extra {} brackets are needed to get matplotlib's latex # rendered to render this properly. return '{%s%s%s}' % (self.lbracket_latex, contents, self.rbracket_latex) class KetBase(StateBase): """Base class for Kets. This class defines the dual property and the brackets for printing. This is an abstract base class and you should not instantiate it directly, instead use Ket. """ lbracket = _straight_bracket rbracket = _rbracket lbracket_ucode = _straight_bracket_ucode rbracket_ucode = _rbracket_ucode lbracket_latex = r'\left|' rbracket_latex = r'\right\rangle ' @classmethod def default_args(self): return ("psi",) @classmethod def dual_class(self): return BraBase def __mul__(self, other): """KetBase*other""" from sympy.physics.quantum.operator import OuterProduct if isinstance(other, BraBase): return OuterProduct(self, other) else: return Expr.__mul__(self, other) def __rmul__(self, other): """other*KetBase""" from sympy.physics.quantum.innerproduct import InnerProduct if isinstance(other, BraBase): return InnerProduct(other, self) else: return Expr.__rmul__(self, other) #------------------------------------------------------------------------- # _eval_* methods #------------------------------------------------------------------------- def _eval_innerproduct(self, bra, **hints): """Evaluate the inner product betweeen this ket and a bra. This is called to compute <bra|ket>, where the ket is ``self``. This method will dispatch to sub-methods having the format:: ``def _eval_innerproduct_BraClass(self, **hints):`` Subclasses should define these methods (one for each BraClass) to teach the ket how to take inner products with bras. """ return dispatch_method(self, '_eval_innerproduct', bra, **hints) def _apply_operator(self, op, **options): """Apply an Operator to this Ket. This method will dispatch to methods having the format:: ``def _apply_operator_OperatorName(op, **options):`` Subclasses should define these methods (one for each OperatorName) to teach the Ket how operators act on it. Parameters ========== op : Operator The Operator that is acting on the Ket. options : dict A dict of key/value pairs that control how the operator is applied to the Ket. """ return dispatch_method(self, '_apply_operator', op, **options) class BraBase(StateBase): """Base class for Bras. This class defines the dual property and the brackets for printing. This is an abstract base class and you should not instantiate it directly, instead use Bra. """ lbracket = _lbracket rbracket = _straight_bracket lbracket_ucode = _lbracket_ucode rbracket_ucode = _straight_bracket_ucode lbracket_latex = r'\left\langle ' rbracket_latex = r'\right|' @classmethod def _operators_to_state(self, ops, **options): state = self.dual_class().operators_to_state(ops, **options) return state.dual def _state_to_operators(self, op_classes, **options): return self.dual._state_to_operators(op_classes, **options) def _enumerate_state(self, num_states, **options): dual_states = self.dual._enumerate_state(num_states, **options) return [x.dual for x in dual_states] @classmethod def default_args(self): return self.dual_class().default_args() @classmethod def dual_class(self): return KetBase def __mul__(self, other): """BraBase*other""" from sympy.physics.quantum.innerproduct import InnerProduct if isinstance(other, KetBase): return InnerProduct(self, other) else: return Expr.__mul__(self, other) def __rmul__(self, other): """other*BraBase""" from sympy.physics.quantum.operator import OuterProduct if isinstance(other, KetBase): return OuterProduct(other, self) else: return Expr.__rmul__(self, other) def _represent(self, **options): """A default represent that uses the Ket's version.""" from sympy.physics.quantum.dagger import Dagger return Dagger(self.dual._represent(**options)) class State(StateBase): """General abstract quantum state used as a base class for Ket and Bra.""" pass class Ket(State, KetBase): """A general time-independent Ket in quantum mechanics. Inherits from State and KetBase. This class should be used as the base class for all physical, time-independent Kets in a system. This class and its subclasses will be the main classes that users will use for expressing Kets in Dirac notation [1]_. Parameters ========== args : tuple The list of numbers or parameters that uniquely specify the ket. This will usually be its symbol or its quantum numbers. For time-dependent state, this will include the time. Examples ======== Create a simple Ket and looking at its properties:: >>> from sympy.physics.quantum import Ket, Bra >>> from sympy import symbols, I >>> k = Ket('psi') >>> k |psi> >>> k.hilbert_space H >>> k.is_commutative False >>> k.label (psi,) Ket's know about their associated bra:: >>> k.dual <psi| >>> k.dual_class() <class 'sympy.physics.quantum.state.Bra'> Take a linear combination of two kets:: >>> k0 = Ket(0) >>> k1 = Ket(1) >>> 2*I*k0 - 4*k1 2*I*|0> - 4*|1> Compound labels are passed as tuples:: >>> n, m = symbols('n,m') >>> k = Ket(n,m) >>> k |nm> References ========== .. [1] http://en.wikipedia.org/wiki/Bra-ket_notation """ @classmethod def dual_class(self): return Bra class Bra(State, BraBase): """A general time-independent Bra in quantum mechanics. Inherits from State and BraBase. A Bra is the dual of a Ket [1]_. This class and its subclasses will be the main classes that users will use for expressing Bras in Dirac notation. Parameters ========== args : tuple The list of numbers or parameters that uniquely specify the ket. This will usually be its symbol or its quantum numbers. For time-dependent state, this will include the time. Examples ======== Create a simple Bra and look at its properties:: >>> from sympy.physics.quantum import Ket, Bra >>> from sympy import symbols, I >>> b = Bra('psi') >>> b <psi| >>> b.hilbert_space H >>> b.is_commutative False Bra's know about their dual Ket's:: >>> b.dual |psi> >>> b.dual_class() <class 'sympy.physics.quantum.state.Ket'> Like Kets, Bras can have compound labels and be manipulated in a similar manner:: >>> n, m = symbols('n,m') >>> b = Bra(n,m) - I*Bra(m,n) >>> b -I*<mn| + <nm| Symbols in a Bra can be substituted using ``.subs``:: >>> b.subs(n,m) <mm| - I*<mm| References ========== .. [1] http://en.wikipedia.org/wiki/Bra-ket_notation """ @classmethod def dual_class(self): return Ket #----------------------------------------------------------------------------- # Time dependent states, bras and kets. #----------------------------------------------------------------------------- class TimeDepState(StateBase): """Base class for a general time-dependent quantum state. This class is used as a base class for any time-dependent state. The main difference between this class and the time-independent state is that this class takes a second argument that is the time in addition to the usual label argument. Parameters ========== args : tuple The list of numbers or parameters that uniquely specify the ket. This will usually be its symbol or its quantum numbers. For time-dependent state, this will include the time as the final argument. """ #------------------------------------------------------------------------- # Initialization #------------------------------------------------------------------------- @classmethod def default_args(self): return ("psi", "t") #------------------------------------------------------------------------- # Properties #------------------------------------------------------------------------- @property def label(self): """The label of the state.""" return self.args[:-1] @property def time(self): """The time of the state.""" return self.args[-1] #------------------------------------------------------------------------- # Printing #------------------------------------------------------------------------- def _print_time(self, printer, *args): return printer._print(self.time, *args) _print_time_repr = _print_time _print_time_latex = _print_time def _print_time_pretty(self, printer, *args): pform = printer._print(self.time, *args) return pform def _print_contents(self, printer, *args): label = self._print_label(printer, *args) time = self._print_time(printer, *args) return '%s;%s' % (label, time) def _print_label_repr(self, printer, *args): label = self._print_sequence(self.label, ',', printer, *args) time = self._print_time_repr(printer, *args) return '%s,%s' % (label, time) def _print_contents_pretty(self, printer, *args): label = self._print_label_pretty(printer, *args) time = self._print_time_pretty(printer, *args) return printer._print_seq((label, time), delimiter=';') def _print_contents_latex(self, printer, *args): label = self._print_sequence( self.label, self._label_separator, printer, *args) time = self._print_time_latex(printer, *args) return '%s;%s' % (label, time) class TimeDepKet(TimeDepState, KetBase): """General time-dependent Ket in quantum mechanics. This inherits from ``TimeDepState`` and ``KetBase`` and is the main class that should be used for Kets that vary with time. Its dual is a ``TimeDepBra``. Parameters ========== args : tuple The list of numbers or parameters that uniquely specify the ket. This will usually be its symbol or its quantum numbers. For time-dependent state, this will include the time as the final argument. Examples ======== Create a TimeDepKet and look at its attributes:: >>> from sympy.physics.quantum import TimeDepKet >>> k = TimeDepKet('psi', 't') >>> k |psi;t> >>> k.time t >>> k.label (psi,) >>> k.hilbert_space H TimeDepKets know about their dual bra:: >>> k.dual <psi;t| >>> k.dual_class() <class 'sympy.physics.quantum.state.TimeDepBra'> """ @classmethod def dual_class(self): return TimeDepBra class TimeDepBra(TimeDepState, BraBase): """General time-dependent Bra in quantum mechanics. This inherits from TimeDepState and BraBase and is the main class that should be used for Bras that vary with time. Its dual is a TimeDepBra. Parameters ========== args : tuple The list of numbers or parameters that uniquely specify the ket. This will usually be its symbol or its quantum numbers. For time-dependent state, this will include the time as the final argument. Examples ======== >>> from sympy.physics.quantum import TimeDepBra >>> from sympy import symbols, I >>> b = TimeDepBra('psi', 't') >>> b <psi;t| >>> b.time t >>> b.label (psi,) >>> b.hilbert_space H >>> b.dual |psi;t> """ @classmethod def dual_class(self): return TimeDepKet class Wavefunction(Function): """Class for representations in continuous bases This class takes an expression and coordinates in its constructor. It can be used to easily calculate normalizations and probabilities. Parameters ========== expr : Expr The expression representing the functional form of the w.f. coords : Symbol or tuple The coordinates to be integrated over, and their bounds Examples ======== Particle in a box, specifying bounds in the more primitive way of using Piecewise: >>> from sympy import Symbol, Piecewise, pi, N >>> from sympy.functions import sqrt, sin >>> from sympy.physics.quantum.state import Wavefunction >>> x = Symbol('x', real=True) >>> n = 1 >>> L = 1 >>> g = Piecewise((0, x < 0), (0, x > L), (sqrt(2//L)*sin(n*pi*x/L), True)) >>> f = Wavefunction(g, x) >>> f.norm 1 >>> f.is_normalized True >>> p = f.prob() >>> p(0) 0 >>> p(L) 0 >>> p(0.5) 2 >>> p(0.85*L) 2*sin(0.85*pi)**2 >>> N(p(0.85*L)) 0.412214747707527 Additionally, you can specify the bounds of the function and the indices in a more compact way: >>> from sympy import symbols, pi, diff >>> from sympy.functions import sqrt, sin >>> from sympy.physics.quantum.state import Wavefunction >>> x, L = symbols('x,L', positive=True) >>> n = symbols('n', integer=True, positive=True) >>> g = sqrt(2/L)*sin(n*pi*x/L) >>> f = Wavefunction(g, (x, 0, L)) >>> f.norm 1 >>> f(L+1) 0 >>> f(L-1) sqrt(2)*sin(pi*n*(L - 1)/L)/sqrt(L) >>> f(-1) 0 >>> f(0.85) sqrt(2)*sin(0.85*pi*n/L)/sqrt(L) >>> f(0.85, n=1, L=1) sqrt(2)*sin(0.85*pi) >>> f.is_commutative False All arguments are automatically sympified, so you can define the variables as strings rather than symbols: >>> expr = x**2 >>> f = Wavefunction(expr, 'x') >>> type(f.variables[0]) <class 'sympy.core.symbol.Symbol'> Derivatives of Wavefunctions will return Wavefunctions: >>> diff(f, x) Wavefunction(2*x, x) """ #Any passed tuples for coordinates and their bounds need to be #converted to Tuples before Function's constructor is called, to #avoid errors from calling is_Float in the constructor def __new__(cls, *args, **options): new_args = [None for i in args] ct = 0 for arg in args: if isinstance(arg, tuple): new_args[ct] = Tuple(*arg) else: new_args[ct] = arg ct += 1 return super(Function, cls).__new__(cls, *new_args, **options) def __call__(self, *args, **options): var = self.variables if len(args) != len(var): raise NotImplementedError( "Incorrect number of arguments to function!") ct = 0 #If the passed value is outside the specified bounds, return 0 for v in var: lower, upper = self.limits[v] #Do the comparison to limits only if the passed symbol is actually #a symbol present in the limits; #Had problems with a comparison of x > L if isinstance(args[ct], Expr) and \ not (lower in args[ct].free_symbols or upper in args[ct].free_symbols): continue if (args[ct] < lower) == True or (args[ct] > upper) == True: return 0 ct += 1 expr = self.expr #Allows user to make a call like f(2, 4, m=1, n=1) for symbol in list(expr.free_symbols): if str(symbol) in options.keys(): val = options[str(symbol)] expr = expr.subs(symbol, val) return expr.subs(zip(var, args)) def _eval_derivative(self, symbol): expr = self.expr deriv = expr._eval_derivative(symbol) return Wavefunction(deriv, *self.args[1:]) def _eval_conjugate(self): return Wavefunction(conjugate(self.expr), *self.args[1:]) def _eval_transpose(self): return self @property def free_symbols(self): return self.expr.free_symbols @property def is_commutative(self): """ Override Function's is_commutative so that order is preserved in represented expressions """ return False @classmethod def eval(self, *args): return None @property def variables(self): """ Return the coordinates which the wavefunction depends on Examples ======== >>> from sympy.physics.quantum.state import Wavefunction >>> from sympy import symbols >>> x,y = symbols('x,y') >>> f = Wavefunction(x*y, x, y) >>> f.variables (x, y) >>> g = Wavefunction(x*y, x) >>> g.variables (x,) """ var = [g[0] if isinstance(g, Tuple) else g for g in self._args[1:]] return tuple(var) @property def limits(self): """ Return the limits of the coordinates which the w.f. depends on If no limits are specified, defaults to ``(-oo, oo)``. Examples ======== >>> from sympy.physics.quantum.state import Wavefunction >>> from sympy import symbols >>> x, y = symbols('x, y') >>> f = Wavefunction(x**2, (x, 0, 1)) >>> f.limits {x: (0, 1)} >>> f = Wavefunction(x**2, x) >>> f.limits {x: (-oo, oo)} >>> f = Wavefunction(x**2 + y**2, x, (y, -1, 2)) >>> f.limits {x: (-oo, oo), y: (-1, 2)} """ limits = [(g[1], g[2]) if isinstance(g, Tuple) else (-oo, oo) for g in self._args[1:]] return dict(zip(self.variables, tuple(limits))) @property def expr(self): """ Return the expression which is the functional form of the Wavefunction Examples ======== >>> from sympy.physics.quantum.state import Wavefunction >>> from sympy import symbols >>> x, y = symbols('x, y') >>> f = Wavefunction(x**2, x) >>> f.expr x**2 """ return self._args[0] @property def is_normalized(self): """ Returns true if the Wavefunction is properly normalized Examples ======== >>> from sympy import symbols, pi >>> from sympy.functions import sqrt, sin >>> from sympy.physics.quantum.state import Wavefunction >>> x, L = symbols('x,L', positive=True) >>> n = symbols('n', integer=True, positive=True) >>> g = sqrt(2/L)*sin(n*pi*x/L) >>> f = Wavefunction(g, (x, 0, L)) >>> f.is_normalized True """ return (self.norm == 1.0) @property @cacheit def norm(self): """ Return the normalization of the specified functional form. This function integrates over the coordinates of the Wavefunction, with the bounds specified. Examples ======== >>> from sympy import symbols, pi >>> from sympy.functions import sqrt, sin >>> from sympy.physics.quantum.state import Wavefunction >>> x, L = symbols('x,L', positive=True) >>> n = symbols('n', integer=True, positive=True) >>> g = sqrt(2/L)*sin(n*pi*x/L) >>> f = Wavefunction(g, (x, 0, L)) >>> f.norm 1 >>> g = sin(n*pi*x/L) >>> f = Wavefunction(g, (x, 0, L)) >>> f.norm sqrt(2)*sqrt(L)/2 """ exp = self.expr*conjugate(self.expr) var = self.variables limits = self.limits for v in var: curr_limits = limits[v] exp = integrate(exp, (v, curr_limits[0], curr_limits[1])) return sqrt(exp) def normalize(self): """ Return a normalized version of the Wavefunction Examples ======== >>> from sympy import symbols, pi >>> from sympy.functions import sqrt, sin >>> from sympy.physics.quantum.state import Wavefunction >>> x = symbols('x', real=True) >>> L = symbols('L', positive=True) >>> n = symbols('n', integer=True, positive=True) >>> g = sin(n*pi*x/L) >>> f = Wavefunction(g, (x, 0, L)) >>> f.normalize() Wavefunction(sqrt(2)*sin(pi*n*x/L)/sqrt(L), (x, 0, L)) """ const = self.norm if const == oo: raise NotImplementedError("The function is not normalizable!") else: return Wavefunction((const)**(-1)*self.expr, *self.args[1:]) def prob(self): """ Return the absolute magnitude of the w.f., `|\psi(x)|^2` Examples ======== >>> from sympy import symbols, pi >>> from sympy.functions import sqrt, sin >>> from sympy.physics.quantum.state import Wavefunction >>> x, L = symbols('x,L', real=True) >>> n = symbols('n', integer=True) >>> g = sin(n*pi*x/L) >>> f = Wavefunction(g, (x, 0, L)) >>> f.prob() Wavefunction(sin(pi*n*x/L)**2, x) """ return Wavefunction(self.expr*conjugate(self.expr), *self.variables)
bsd-3-clause
jrleja/bsfh
misc/timings_pyfsps.py
3
4274
#compare a lookup table of spectra at ages and metallicities to #calls to fsps.sps.get_spectrum() for different metallicities import time, os, subprocess, re, sys import numpy as np #import matplotlib.pyplot as pl import fsps from prospect import sources as sps_basis from prospect.models import sedmodel def run_command(cmd): """ Open a child process, and return its exit status and stdout. """ child = subprocess.Popen(cmd, shell=True, stderr=subprocess.PIPE, stdin=subprocess.PIPE, stdout=subprocess.PIPE) out = [s for s in child.stdout] w = child.wait() return os.WEXITSTATUS(w), out # Check to make sure that the required environment variable is present. try: ev = os.environ["SPS_HOME"] except KeyError: raise ImportError("You need to have the SPS_HOME environment variable") # Check the SVN revision number. cmd = ["svnversion", ev] stat, out = run_command(" ".join(cmd)) fsps_vers = int(re.match("^([0-9])+", out[0]).group(0)) sps = fsps.StellarPopulation(zcontinuous=True) print('FSPS version = {}'.format(fsps_vers)) print('Zs={0}, N_lambda={1}'.format(sps.zlegend, len(sps.wavelengths))) print('single age') def spec_from_fsps(z, t, s): t0 = time.time() sps.params['logzsol'] = z sps.params['sigma_smooth'] = s sps.params['tage'] = t wave, spec = sps.get_spectrum(peraa=True, tage = sps.params['tage']) #print(spec.shape) return time.time()-t0 def mags_from_fsps(z, t, s): t0 = time.time() sps.params['zred']=t sps.params['logzsol'] = z sps.params['sigma_smooth'] = s sps.params['tage'] = t mags = sps.get_mags(tage = sps.params['tage'], redshift=0.0) #print(spec.shape) return time.time()-t0 def spec_from_ztinterp(z, t, s): t0 = time.time() sps.params['logzsol'] = z sps.params['sigma_smooth'] = s sps.params['tage'] = t sps.params['imf3'] = s spec, m, l = sps.ztinterp(sps.params['logzsol'], sps.params['tage'], peraa=True) #print(spec.shape) return time.time()-t0 if sys.argv[1] == 'mags': from_fsps = mags_from_fsps print('timing get_mags') print('nbands = {}'.format(len(sps.get_mags(tage=1.0)))) elif sys.argv[1] == 'spec': from_fsps = spec_from_fsps print('timing get_spectrum') elif sys.argv[1] == 'ztinterp': from_fsps = spec_from_ztinterp print('timing get_spectrum') elif sys.argv[1] == 'sedpy': from sedpy import observate nbands = len(sps.get_mags(tage=1.0)) fnames = nbands * ['sdss_r0'] filters = observate.load_filters(fnames) def mags_from_sedpy(z, t, s): t0 = time.time() sps.params['logzsol'] = z sps.params['sigma_smooth'] = s sps.params['tage'] = t wave, spec = sps.get_spectrum(peraa=True, tage = sps.params['tage']) mags = observate.getSED(wave, spec, filters) return time.time()-t0 from_fsps = mags_from_sedpy sps.params['add_neb_emission'] = False sps.params['smooth_velocity'] = True sps.params['sfh'] = 0 ntry = 30 zz = np.random.uniform(-1,0,ntry) tt = np.random.uniform(0.1,4,ntry) ss = np.random.uniform(1,2.5,ntry) #make sure all z's already compiled _ =[from_fsps(z, 1.0, 0.0) for z in [-1, -0.8, -0.6, -0.4, -0.2, 0.0]] all_dur = [] print('no neb emission:') dur_many = np.zeros(ntry) for i in xrange(ntry): dur_many[i] = from_fsps(zz[i], tt[i], ss[i]) print('<t/call>={0}s, sigma_t={1}s'.format(dur_many.mean(), dur_many.std())) all_dur += [dur_many] print('no neb emission, no smooth:') dur_many = np.zeros(ntry) for i in xrange(ntry): dur_many[i] = from_fsps(zz[i], tt[i], 0.0) print('<t/call>={0}s, sigma_t={1}s'.format(dur_many.mean(), dur_many.std())) all_dur += [dur_many] sps.params['add_neb_emission'] = True print('neb emission:') dur_many = np.zeros(ntry) for i in xrange(ntry): dur_many[i] = from_fsps(zz[i], tt[i], ss[i]) print('<t/call>={0}s, sigma_t={1}s'.format(dur_many.mean(), dur_many.std())) all_dur += [dur_many] print('neb emission, no smooth:') dur_many = np.zeros(ntry) for i in xrange(ntry): dur_many[i] = from_fsps(zz[i], tt[i], 0.0) print('<t/call>={0}s, sigma_t={1}s'.format(dur_many.mean(), dur_many.std())) all_dur += [dur_many]
mit
allenlavoie/tensorflow
tensorflow/contrib/learn/python/learn/learn_io/pandas_io.py
28
5024
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Methods to allow pandas.DataFrame (deprecated). This module and all its submodules are deprecated. See [contrib/learn/README.md](https://www.tensorflow.org/code/tensorflow/contrib/learn/README.md) for migration instructions. """ from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow.python.estimator.inputs.pandas_io import pandas_input_fn as core_pandas_input_fn from tensorflow.python.util.deprecation import deprecated try: # pylint: disable=g-import-not-at-top import pandas as pd HAS_PANDAS = True except IOError: # Pandas writes a temporary file during import. If it fails, don't use pandas. HAS_PANDAS = False except ImportError: HAS_PANDAS = False PANDAS_DTYPES = { 'int8': 'int', 'int16': 'int', 'int32': 'int', 'int64': 'int', 'uint8': 'int', 'uint16': 'int', 'uint32': 'int', 'uint64': 'int', 'float16': 'float', 'float32': 'float', 'float64': 'float', 'bool': 'i' } @deprecated(None, 'Please use tf.estimator.inputs.pandas_input_fn') def pandas_input_fn(x, y=None, batch_size=128, num_epochs=1, shuffle=True, queue_capacity=1000, num_threads=1, target_column='target'): """This input_fn diffs from the core version with default `shuffle`.""" return core_pandas_input_fn(x=x, y=y, batch_size=batch_size, shuffle=shuffle, num_epochs=num_epochs, queue_capacity=queue_capacity, num_threads=num_threads, target_column=target_column) @deprecated(None, 'Please access pandas data directly.') def extract_pandas_data(data): """Extract data from pandas.DataFrame for predictors. Given a DataFrame, will extract the values and cast them to float. The DataFrame is expected to contain values of type int, float or bool. Args: data: `pandas.DataFrame` containing the data to be extracted. Returns: A numpy `ndarray` of the DataFrame's values as floats. Raises: ValueError: if data contains types other than int, float or bool. """ if not isinstance(data, pd.DataFrame): return data bad_data = [column for column in data if data[column].dtype.name not in PANDAS_DTYPES] if not bad_data: return data.values.astype('float') else: error_report = [("'" + str(column) + "' type='" + data[column].dtype.name + "'") for column in bad_data] raise ValueError('Data types for extracting pandas data must be int, ' 'float, or bool. Found: ' + ', '.join(error_report)) @deprecated(None, 'Please access pandas data directly.') def extract_pandas_matrix(data): """Extracts numpy matrix from pandas DataFrame. Args: data: `pandas.DataFrame` containing the data to be extracted. Returns: A numpy `ndarray` of the DataFrame's values. """ if not isinstance(data, pd.DataFrame): return data return data.as_matrix() @deprecated(None, 'Please access pandas data directly.') def extract_pandas_labels(labels): """Extract data from pandas.DataFrame for labels. Args: labels: `pandas.DataFrame` or `pandas.Series` containing one column of labels to be extracted. Returns: A numpy `ndarray` of labels from the DataFrame. Raises: ValueError: if more than one column is found or type is not int, float or bool. """ if isinstance(labels, pd.DataFrame): # pandas.Series also belongs to DataFrame if len(labels.columns) > 1: raise ValueError('Only one column for labels is allowed.') bad_data = [column for column in labels if labels[column].dtype.name not in PANDAS_DTYPES] if not bad_data: return labels.values else: error_report = ["'" + str(column) + "' type=" + str(labels[column].dtype.name) for column in bad_data] raise ValueError('Data types for extracting labels must be int, ' 'float, or bool. Found: ' + ', '.join(error_report)) else: return labels
apache-2.0
Loisel/tmr3
tmr.py
1
15096
#!/usr/bin/python """ A module to calculate the current, the conductance and the TMR from a set of rate arrays. The rate arrays are supposed to be stored in a h5 file in the job directory. The result is stored in a h5 file. The name of the dataset contains all parameters. They are also stored as attributes in the dataset. The conductance in the two lead configurations (parallel/anti-parallel) are stored in arrays in the dataset. Usage: ./tmr.py <jobname> """ import numpy as np from numpy import linalg import time import sys import getopt import h5py import os # We are picky about possible floating point overflows # to avoid calculating NaNs np.seterr(divide="raise") np.seterr(invalid="raise") # A helper module to calculate the populations. import pop # The configuration module import cfg # path to the dat directory datpath = "dat/" # name of the temporary file where the rates are stored ratefile = "running_calc.h5" # name of the h5 file to store the conductance for the two configuration # and the configuraion parameters. hdffile = "simdata_new.h5" def save_hdf5(fname,G_P,G_AP): """ Store the conductance and the configuration to the h5 file. Args: fname: filename of the h5 file G_P: the conductance for leads with parallel magnetization G_AP: the conductance for leads with anti-parallel magnetization """ print "Shape of GP {}".format(G_P.shape) fileh = h5py.File(fname,"a") # Note that the selection of parameters to construct the name of the # dataset should be chosen such that this string is unique! # That is, it should contain all running parameters. dset_name = "G={}_kbT={}_Ec={}_E0={}_Pol={}_PolOrb={}_SO={}_tau={}_DS={}_B_P={}_B_AP={}_B_ORB_P={}_B_ORB_AP={}_W_e={}_W_0={}".format(cfg.conf['G_scale'],cfg.conf['kBT'],cfg.conf['E_C'],cfg.conf['E_0'],cfg.conf['Pol'],cfg.conf['OrbPol'],cfg.conf['SO'],cfg.conf['tau_r'],cfg.conf['D_S_factor'],cfg.conf['B_P'],cfg.conf['B_AP'],cfg.conf['B_ORB_P'],cfg.conf['B_ORB_AP'],cfg.conf['W_E'],cfg.conf['W_0']) try: # we create the dataset dset = fileh.create_dataset(dset_name,data=np.vstack((G_P,G_AP))) # and store the config attributes dset.attrs['alpha'] = cfg.conf['ALPHA'] dset.attrs['temperature'] = cfg.conf['kBT'] dset.attrs['coupling'] = cfg.conf['G_scale'] dset.attrs['electron_number'] = cfg.conf['N_0'] dset.attrs['charging_energy'] = cfg.conf['E_C'] dset.attrs['level_spacing'] = cfg.conf['E_0'] dset.attrs['polarization_spin'] = cfg.conf['Pol'] dset.attrs['polarization_orbit'] = cfg.conf['OrbPol'] dset.attrs['spinorbit'] = cfg.conf['SO'] dset.attrs['stonershift'] = cfg.conf['D_S_factor'] dset.attrs['tau_r'] = cfg.conf['tau_r'] dset.attrs['vg_min'] = cfg.conf['V_g_min'] dset.attrs['vg_max'] = cfg.conf['V_g_max'] dset.attrs['b_p'] = cfg.conf['B_P'] dset.attrs['b_ap'] = cfg.conf['B_AP'] dset.attrs['b_orb_p'] = cfg.conf['B_ORB_P'] dset.attrs['b_orb_ap'] = cfg.conf['B_ORB_AP'] dset.attrs['w_0'] = cfg.conf['W_0'] dset.attrs['w_e'] = cfg.conf['W_E'] dset.attrs['timestamp'] = time.time() except KeyError: # If the choice was not unique we complain but continue. print "Dataset exists." fileh.close() def eval_DENKER(GM,GP,configuration): """ Evaluate the density matrix kernel using the in- and out-tunneling rates. Args: GM,GP: numpy arrays containing in- and out-tunneling rates in the order of cfg.TLIST. configuration: integer determining parallel (0) or anti-parallel(1) configuration Returns: the density matrix as a square 2-d numpy array that is NP**2 in size, where NP is the number of states in the groundstatespace. """ # we get a view on the transition list and, for simplicity, its transpose TLIST = cfg.TLIST[configuration] TLIST_T = np.transpose(TLIST) # from all transitions we extract all groundstates in the statespace # this is probably a complicated way to do it PLIST = list(set(TLIST_T[0]).union(TLIST_T[1])) # ... and sort it by index PLIST.sort() # the number of groundstates NP = len(PLIST) # let's create an empty density matrix ME = np.zeros((NP,NP)) # we create a version of the transition list that does not contain # the indices in terms of the energy array (see cfg.py), but # in terms of the number in the state list (plist) # (the transition list can then be used to denote non-zero matrix elements) TMP = np.copy(TLIST) for idx,val in enumerate(PLIST): TMP[TLIST == val] = idx # We calculate diagonal elements of the density matrix: # TLIST_T[1] == num selects the correct in-tunneling rates for the # state with label num # have a look at numpy.where to understand this line for idx,num in enumerate(PLIST): ME[idx,idx] = -np.sum(np.where(TLIST_T[1] == num,GP,0.)) - np.sum(np.where(TLIST_T[0] == num,GM,0.)) # for the off diagonal elements we can directly use the generated TMP # transition list for k,tup in enumerate(TMP): ME[tup[0],tup[1]] = GP[k] ME[tup[1],tup[0]] = GM[k] # print "tup: {} and matrix element {}".format(tup,ME[tuple(tup)]) return ME def eval_CURKER(GM,GP,configuration): """ Evaluate the current kernel using the in- and out-tunneling rates. Args: GM,GP: numpy arrays containing in- and out-tunneling rates in the order of cfg.TLIST. configuration: integer determining parallel (0) or anti-parallel(1) configuration Returns: the current kernel as a 1-d numpy array. """ # We get a view on the transition list and its transpose TLIST = cfg.TLIST[configuration] TLIST_T = np.transpose(TLIST) # ... and extract the list of groundstates (see also eval_DENKER) PLIST = list(set(TLIST_T[0]).union(TLIST_T[1])) PLIST.sort() # this determines the size of the statespace NP = len(PLIST) CUR = np.zeros(NP) # Note that the current kernel can be calculated by summing the diagonal elements # of the density matrix with opposite sign # compare eval_DENKER for idx,num in enumerate(PLIST): CUR[idx] = np.sum(np.where(TLIST_T[1] == num,GP,0.)) - np.sum(np.where(TLIST_T[0] == num,GM,0.)) return CUR def current(GP,GM,POP,configuration): """ Calculate the current using the rates and populations. Args: GP, GM: np-arrays containing in- and out-tunneling rates. POP: np-array for the populations configuration: integer determining parallel (0) or anti-parallel(1) configuration Returns: current as a float. """ # We calculate the current kernel CURKER = eval_CURKER(GM,GP,configuration) # and vector-multiply it with the population vector I = -np.sum(cfg.conf["ELE"]*np.dot( CURKER, POP)) return I def eval_tmr(fname,plotname,pop): """ Calculates the TMR by evaluating conductance through parallel and anti-parallel polarized contacts. Args: fname: the h5 file to load the rates from. plotname: A name for the pdf output to produce. pop: If True, we plot the populations, too. """ # We prepare the current and conductance vectors for different # values of gate and bias voltage C_p = np.zeros((cfg.conf['NV'],cfg.conf['NVb'])) C_ap = np.zeros((cfg.conf['NV'],cfg.conf['NVb'])) G_p = np.zeros((cfg.conf['NV'],cfg.conf['NVb']-1)) G_ap = np.zeros((cfg.conf['NV'],cfg.conf['NVb']-1)) dVb = cfg.conf['Vb_range'][1]- cfg.conf['Vb_range'][0] # the population vectors, for all values of gate and bias POP_p = np.zeros((cfg.conf['NVb'],cfg.conf['NV'],cfg.N_GS[0])) POP_ap = np.zeros((cfg.conf['NVb'],cfg.conf['NV'],cfg.N_GS[1])) # We iterate over two bias values first for nV,Vb in enumerate(cfg.conf["Vb_range"]): # now the rates are loaded from the h5 file # note that the label of the specific rate arrays are fixed with h5py.File(fname) as file: GP0_p = np.array(file['par_P0_V{}'.format(Vb)]) GP0_ap = np.array(file['apa_P0_V{}'.format(Vb)]) GP1_p = np.array(file['par_P1_V{}'.format(Vb)]) GP1_ap = np.array(file['apa_P1_V{}'.format(Vb)]) GM0_p = np.array(file['par_M0_V{}'.format(Vb)]) GM0_ap = np.array(file['apa_M0_V{}'.format(Vb)]) GM1_p = np.array(file['par_M1_V{}'.format(Vb)]) GM1_ap = np.array(file['apa_M1_V{}'.format(Vb)]) # for the density kernel, we sum all rates over both leads DENKER_p = np.array([eval_DENKER(GM0_p[n]+GM1_p[n],GP0_p[n]+GP1_p[n],0)for n in range(cfg.conf["NV"])]) DENKER_ap = np.array([eval_DENKER(GM0_ap[n]+GM1_ap[n],GP0_ap[n]+GP1_ap[n],1)for n in range(cfg.conf["NV"])]) # the populations are calculated from the density kernel by an asymptotic # approximation scheme POP_ap[nV] = np.array([pop.asymptotic_ssp(DENKER_ap[n]) for n in range(cfg.conf["NV"])]) POP_p[nV] = np.array([pop.asymptotic_ssp(DENKER_p[n]) for n in range(cfg.conf["NV"])]) # note that the current is calculated from the rates in one of the leads only C_p[:,nV] = np.array([ current(GP0_p[n],GM0_p[n],POP_p[nV,n],0) for n in np.arange(cfg.conf["NV"]) ]) C_ap[:,nV] = np.array([ current(GP0_ap[n],GM0_ap[n],POP_ap[nV,n],1) for n in np.arange(cfg.conf["NV"]) ]) # the numerical derivative gives the conductance G_p = np.diff(C_p).flatten()/dVb G_ap = np.diff(C_ap).flatten()/dVb # we save the conductance traces to a h5 file specified as a global variable # hdffile in the path datpath # It is possible that the dataset already exists. In this case, we issue a warning. try: save_hdf5("{}{}".format(datpath,hdffile),G_p,G_ap) except RuntimeError: print "Unable to save to {}, maybe there is already a dataset with similar parameters...".format(hdffile) # the tmr and conductance graphs are plotted to a pdf file for review. plot_tmr_pdf(G_p,G_ap,plotname) # if the pop flag is set, we also plot the population for one bias value if pop: plot_population([POP_p[0],POP_ap[0]],os.path.splitext(plotname)[0]+"_POP.pdf") def plot_tmr_pdf(C_p,C_ap,fname): """ A helper routine to plot the conductance and TMR to a pdf file in the datpath. Args: C_p, C_ap: the parallel and anti-parallel conductance. fname: the filename to plot to """ import matplotlib as mpl mpl.use('Agg') import matplotlib.pyplot as plt # we plot the conductance graph on top, p and ap with different colors Axes1 = plt.subplot(2,1,1) Axes1.set_xticklabels([]) plt.ylabel("Conductance (e^2/h)") plt.title("Conductance at zero bias") # parallel is plotted in red, and anti-parallel as blue dashed line plt.plot( cfg.conf["V_g"],C_p,'r',cfg.conf["V_g"], C_ap, 'b--') # on the second panel, the TMR is plotted Axes2 = plt.subplot(2,1,2) plt.xlabel("gate voltage (V)") plt.ylabel("TMR") plt.title("TMR") plt.ylim((-0.3,1.5)) TMR = np.zeros(cfg.conf["NV"]) for i in range(cfg.conf["NV"]): try: TMR[i] = C_p[i]/C_ap[i]-1. except ZeroDivisionError: print "Zero Division, returning null." TMR[i] = 0. plt.plot( cfg.conf["V_g"], TMR) plt.savefig(fname, bbox_inches='tight') def plot_population(POP, fname): """ Calculates and plots selected populations of the quantum dot with gate voltage. The edge states N=-1 and 5 are neglected. Args: POP: a list with the two population vectors for parallel and anti-parallel configurations fname: the filename to plot to """ import matplotlib.pyplot as plt NV = cfg.conf["NV"] print "Calculating populations..." # We plot the populations for both configurations # the parallel populations on top # the anti-parallel on bottom Ax = [plt.subplot(2,1,1),plt.subplot(2,1,2)] cm = plt.get_cmap('gist_rainbow') PopPlots = [1,4,8,12,17,18] NP = len(PopPlots) for gamidx in range(2): TLIST = cfg.TLIST[gamidx] TLIST_T = np.transpose(TLIST) PLIST = list(set(TLIST_T[0]).union(TLIST_T[1])) PLIST.sort() # we cycle through the linecolors to distinguish the different # groundstates Ax[gamidx].set_color_cycle([cm(1.*k/NP) for k in range(NP)]) for i in PopPlots: color = cm(1.*i/NP) LABEL = "P_{}".format(cfg.int_to_state(PLIST[i])) Ax[gamidx].plot( cfg.conf["V_g"], POP[gamidx][:,i],label=LABEL) lines =Ax[gamidx].get_lines() labels = [l.get_label() for l in lines] leg = plt.figlegend(lines,labels,loc='upper right') plt.savefig(fname) plt.show() class Usage(Exception): def __init__(self, msg): self.msg = msg def main(argv=None): """ Interface routine to call the tmr module. Example: ./tmr.py <jobname> In principle, there were routines to plot rates, populations, conductances etc. but apart from the population plotting, none of the use cases was needed anymore. """ POP = False # The default config file is called cnt.conf cfile = "cnt.conf" rlist = [0.,] if argv is None: argv = sys.argv try: try: opts, args = getopt.getopt(argv[1:], "hc:P", ["help","config=","pop"]) except getopt.error, msg: raise Usage(msg) for o,a in opts: if o in ('-h','--help'): usage() exit() elif o in ('-c','--config'): cfile = a elif o in ('-P','--pop'): POP = True else: raise Usage('Invalid argument.') # we parse the config and initialize it cfg.parse_conf("dat/{0}/{1}".format(args[0],cfile)) cfg.init() h5file = "{}{}/{}".format(datpath,args[0],ratefile) pdffile = "{}{}.pdf".format(datpath,args[0]) print "Try to open {}".format(h5file) eval_tmr(h5file,pdffile,POP) except Usage, err: print >>sys.stderr, err.msg print >>sys.stderr, "for help use --help" return 2 def usage(): print "This is a tool to process rate files.\n\ \n\ usage: tmr.py [-hP] [--pop] jobname\n\ \n\ --pop or -P: Plot the populations.\n\ \n\ jobname: The name of the directory for the rate files.\n\ \n\ The script searches for files dat/jobname/running_calc.h5\n\ and dat/jobname/cnt.conf" if __name__ == "__main__": sys.exit(main())
gpl-3.0
camallen/aggregation
experimental/condor/animal_EM.py
2
7334
#!/usr/bin/env python __author__ = 'greghines' import numpy as np import os import pymongo import sys import cPickle as pickle import bisect import csv import matplotlib.pyplot as plt import random import math import urllib import matplotlib.cbook as cbook def index(a, x): 'Locate the leftmost value exactly equal to x' i = bisect.bisect_left(a, x) if i != len(a) and a[i] == x: return i raise ValueError if os.path.exists("/home/ggdhines"): sys.path.append("/home/ggdhines/PycharmProjects/reduction/experimental/clusteringAlg") sys.path.append("/home/ggdhines/PycharmProjects/reduction/experimental/classifier") else: sys.path.append("/home/greg/github/reduction/experimental/clusteringAlg") sys.path.append("/home/greg/github/reduction/experimental/classifier") #from divisiveDBSCAN import DivisiveDBSCAN from divisiveDBSCAN_multi import DivisiveDBSCAN from divisiveKmeans import DivisiveKmeans from iterativeEM import IterativeEM if os.path.exists("/home/ggdhines"): base_directory = "/home/ggdhines" else: base_directory = "/home/greg" client = pymongo.MongoClient() db = client['condor_2014-11-23'] classification_collection = db["condor_classifications"] subject_collection = db["condor_subjects"] big_userList = [] big_subjectList = [] animal_count = 0 f = open(base_directory+"/Databases/condor_ibcc.csv","wb") f.write("a,b,c\n") alreadyDone = [] animals_in_image = {} animal_index = -1 global_user_list = [] animal_to_image = [] zooniverse_list = [] condor_votes = {} animal_votes = {} #subject_vote = {} results = [] to_sample_from = list(subject_collection.find({"state":"complete"})) to_sample_from2 = list(subject_collection.find({"classification_count":1,"state":"active"})) votes = [] sample = random.sample(to_sample_from,100) #sample.extend(random.sample(to_sample_from2,1000)) # for subject_index,subject in enumerate(sample): # print "== " + str(subject_index) # zooniverse_id = subject["zooniverse_id"] # for user_index,classification in enumerate(classification_collection.find({"subjects.zooniverse_id":zooniverse_id})): # if "user_name" in classification: # user = classification["user_name"] # else: # user = classification["user_ip"] # # try: # tt = index(big_userList,user) # except ValueError: # bisect.insort(big_userList,user) for subject_index,subject in enumerate(sample): print subject_index zooniverse_id = subject["zooniverse_id"] annotation_list = [] user_list = [] animal_list = [] #local_users = [] for user_index,classification in enumerate(classification_collection.find({"subjects.zooniverse_id":zooniverse_id})): try: mark_index = [ann.keys() for ann in classification["annotations"]].index(["marks",]) markings = classification["annotations"][mark_index].values()[0] if "user_name" in classification: user = classification["user_name"] else: user = classification["user_ip"] found_condor = False for animal in markings.values(): scale = 1.875 x = scale*float(animal["x"]) y = scale*float(animal["y"]) animal_type = animal["animal"] if not(animal_type in ["carcassOrScale","carcass"]): annotation_list.append((x,y)) #print annotation_list user_list.append(user) animal_list.append(animal_type) if not(user in global_user_list): global_user_list.append(user) #local_users.append(user) if animal_type == "condor": found_condor = True except (ValueError,KeyError): pass #if there were any markings on the image, use divisive kmeans to cluster the points so that each #cluster represents an image if annotation_list != []: user_identified,clusters = DivisiveKmeans(3).fit2(annotation_list,user_list,debug=True) #fix split clusters if necessary if user_identified != []: user_identified,clusters = DivisiveKmeans(3).__fix__(user_identified,clusters,annotation_list,user_list,200) for center,c in zip(user_identified,clusters): animal_index += 1 #animal_votes.append([]) animal_to_image.append(zooniverse_id) if not(zooniverse_id in animals_in_image): animals_in_image[zooniverse_id] = [animal_index] else: animals_in_image[zooniverse_id].append(animal_index) results.append((zooniverse_id,center)) for pt in c: pt_index = annotation_list.index(pt) user_index = global_user_list.index(user_list[pt_index]) animal_type = animal_list[annotation_list.index(pt)] if animal_type == "condor": votes.append((user_index,animal_index,1)) if not(animal_index in animal_votes): animal_votes[animal_index] = [1] else: animal_votes[animal_index].append(1) else: votes.append((user_index,animal_index,0)) if not(animal_index in animal_votes): animal_votes[animal_index] = [0] else: animal_votes[animal_index].append(0) print "=====---" #print votes classify = IterativeEM() classify.__classify__(votes) most_likely = classify.__getMostLikely__() estimates = classify.__getEstimates__() X = [] Y = [] X2 = [] Y2 = [] #for subject_index,zooniverse_id in enumerate(big_subjectList): for ii in range(animal_index): x = np.mean(animal_votes[ii]) y = estimates[ii][1] X.append(x) Y.append(y) if math.fabs(x-y) > 0.3: zooniverse_id,(centerX,centerY) = results[ii] print x,y subject = subject_collection.find_one({"zooniverse_id":zooniverse_id}) url = subject["location"]["standard"] slash_index = url.rfind("/") object_id = url[slash_index+1:] if not(os.path.isfile(base_directory+"/Databases/condors/images/"+object_id)): urllib.urlretrieve (url, base_directory+"/Databases/condors/images/"+object_id) image_file = cbook.get_sample_data(base_directory+"/Databases/condors/images/"+object_id) image = plt.imread(image_file) fig, ax = plt.subplots() im = ax.imshow(image) plt.plot([centerX,],[centerY,],'o') plt.show() # #if ((x < 0.5) and (y > 0.5)) or ((x > 0.5) and (y < 0.5)): # subject = subject_collection.find_one({"zooniverse_id":zooniverse_id}) # print x,y # print subject["location"]["standard"] # #print most_likely[subject_index],estimates[subject_index],np.mean(subject_vote[zooniverse_id]) #else: # print estimates[subject_index],0 plt.plot(X,Y,'.',color="blue") plt.plot(X2,Y2,'.',color="red") plt.xlim((-0.05,1.05)) plt.ylim((-0.05,1.05)) plt.show()
apache-2.0
sjperkins/tensorflow
tensorflow/python/estimator/canned/dnn_linear_combined_test.py
5
26973
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for dnn_linear_combined.py.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import shutil import tempfile import numpy as np import six from tensorflow.core.example import example_pb2 from tensorflow.core.example import feature_pb2 from tensorflow.python.estimator.canned import dnn_linear_combined from tensorflow.python.estimator.canned import dnn_testing_utils from tensorflow.python.estimator.canned import linear_testing_utils from tensorflow.python.estimator.canned import prediction_keys from tensorflow.python.estimator.export import export from tensorflow.python.estimator.inputs import numpy_io from tensorflow.python.estimator.inputs import pandas_io from tensorflow.python.feature_column import feature_column from tensorflow.python.framework import dtypes from tensorflow.python.framework import ops from tensorflow.python.ops import nn from tensorflow.python.ops import parsing_ops from tensorflow.python.ops import variables as variables_lib from tensorflow.python.platform import gfile from tensorflow.python.platform import test from tensorflow.python.summary.writer import writer_cache from tensorflow.python.training import checkpoint_utils from tensorflow.python.training import gradient_descent from tensorflow.python.training import input as input_lib from tensorflow.python.training import optimizer as optimizer_lib try: # pylint: disable=g-import-not-at-top import pandas as pd HAS_PANDAS = True except IOError: # Pandas writes a temporary file during import. If it fails, don't use pandas. HAS_PANDAS = False except ImportError: HAS_PANDAS = False class DNNOnlyModelFnTest(dnn_testing_utils.BaseDNNModelFnTest, test.TestCase): def __init__(self, methodName='runTest'): # pylint: disable=invalid-name test.TestCase.__init__(self, methodName) dnn_testing_utils.BaseDNNModelFnTest.__init__(self, self._dnn_only_model_fn) def _dnn_only_model_fn(self, features, labels, mode, head, hidden_units, feature_columns, optimizer='Adagrad', activation_fn=nn.relu, dropout=None, input_layer_partitioner=None, config=None): return dnn_linear_combined._dnn_linear_combined_model_fn( features=features, labels=labels, mode=mode, head=head, linear_feature_columns=[], dnn_hidden_units=hidden_units, dnn_feature_columns=feature_columns, dnn_optimizer=optimizer, dnn_activation_fn=activation_fn, dnn_dropout=dropout, input_layer_partitioner=input_layer_partitioner, config=config) # A function to mimic linear-regressor init reuse same tests. def _linear_regressor_fn(feature_columns, model_dir=None, label_dimension=1, weight_column=None, optimizer='Ftrl', config=None, partitioner=None): return dnn_linear_combined.DNNLinearCombinedRegressor( model_dir=model_dir, linear_feature_columns=feature_columns, linear_optimizer=optimizer, label_dimension=label_dimension, weight_column=weight_column, input_layer_partitioner=partitioner, config=config) class LinearOnlyRegressorPartitionerTest( linear_testing_utils.BaseLinearRegressorPartitionerTest, test.TestCase): def __init__(self, methodName='runTest'): # pylint: disable=invalid-name test.TestCase.__init__(self, methodName) linear_testing_utils.BaseLinearRegressorPartitionerTest.__init__( self, _linear_regressor_fn) class LinearOnlyRegressorEvaluationTest( linear_testing_utils.BaseLinearRegressorEvaluationTest, test.TestCase): def __init__(self, methodName='runTest'): # pylint: disable=invalid-name test.TestCase.__init__(self, methodName) linear_testing_utils.BaseLinearRegressorEvaluationTest.__init__( self, _linear_regressor_fn) class LinearOnlyRegressorPredictTest( linear_testing_utils.BaseLinearRegressorPredictTest, test.TestCase): def __init__(self, methodName='runTest'): # pylint: disable=invalid-name test.TestCase.__init__(self, methodName) linear_testing_utils.BaseLinearRegressorPredictTest.__init__( self, _linear_regressor_fn) class LinearOnlyRegressorIntegrationTest( linear_testing_utils.BaseLinearRegressorIntegrationTest, test.TestCase): def __init__(self, methodName='runTest'): # pylint: disable=invalid-name test.TestCase.__init__(self, methodName) linear_testing_utils.BaseLinearRegressorIntegrationTest.__init__( self, _linear_regressor_fn) class LinearOnlyRegressorTrainingTest( linear_testing_utils.BaseLinearRegressorTrainingTest, test.TestCase): def __init__(self, methodName='runTest'): # pylint: disable=invalid-name test.TestCase.__init__(self, methodName) linear_testing_utils.BaseLinearRegressorTrainingTest.__init__( self, _linear_regressor_fn) def _linear_classifier_fn(feature_columns, model_dir=None, n_classes=2, weight_column=None, label_vocabulary=None, optimizer='Ftrl', config=None, partitioner=None): return dnn_linear_combined.DNNLinearCombinedClassifier( model_dir=model_dir, linear_feature_columns=feature_columns, linear_optimizer=optimizer, n_classes=n_classes, weight_column=weight_column, label_vocabulary=label_vocabulary, input_layer_partitioner=partitioner, config=config) class LinearOnlyClassifierTrainingTest( linear_testing_utils.BaseLinearClassifierTrainingTest, test.TestCase): def __init__(self, methodName='runTest'): # pylint: disable=invalid-name test.TestCase.__init__(self, methodName) linear_testing_utils.BaseLinearClassifierTrainingTest.__init__( self, linear_classifier_fn=_linear_classifier_fn) class LinearOnlyClassifierClassesEvaluationTest( linear_testing_utils.BaseLinearClassifierEvaluationTest, test.TestCase): def __init__(self, methodName='runTest'): # pylint: disable=invalid-name test.TestCase.__init__(self, methodName) linear_testing_utils.BaseLinearClassifierEvaluationTest.__init__( self, linear_classifier_fn=_linear_classifier_fn) class LinearOnlyClassifierPredictTest( linear_testing_utils.BaseLinearClassifierPredictTest, test.TestCase): def __init__(self, methodName='runTest'): # pylint: disable=invalid-name test.TestCase.__init__(self, methodName) linear_testing_utils.BaseLinearClassifierPredictTest.__init__( self, linear_classifier_fn=_linear_classifier_fn) class LinearOnlyClassifierIntegrationTest( linear_testing_utils.BaseLinearClassifierIntegrationTest, test.TestCase): def __init__(self, methodName='runTest'): # pylint: disable=invalid-name test.TestCase.__init__(self, methodName) linear_testing_utils.BaseLinearClassifierIntegrationTest.__init__( self, linear_classifier_fn=_linear_classifier_fn) class DNNLinearCombinedRegressorIntegrationTest(test.TestCase): def setUp(self): self._model_dir = tempfile.mkdtemp() def tearDown(self): if self._model_dir: writer_cache.FileWriterCache.clear() shutil.rmtree(self._model_dir) def _test_complete_flow( self, train_input_fn, eval_input_fn, predict_input_fn, input_dimension, label_dimension, batch_size): linear_feature_columns = [ feature_column.numeric_column('x', shape=(input_dimension,))] dnn_feature_columns = [ feature_column.numeric_column('x', shape=(input_dimension,))] feature_columns = linear_feature_columns + dnn_feature_columns est = dnn_linear_combined.DNNLinearCombinedRegressor( linear_feature_columns=linear_feature_columns, dnn_hidden_units=(2, 2), dnn_feature_columns=dnn_feature_columns, label_dimension=label_dimension, model_dir=self._model_dir) # TRAIN num_steps = 10 est.train(train_input_fn, steps=num_steps) # EVALUTE scores = est.evaluate(eval_input_fn) self.assertEqual(num_steps, scores[ops.GraphKeys.GLOBAL_STEP]) self.assertIn('loss', six.iterkeys(scores)) # PREDICT predictions = np.array([ x[prediction_keys.PredictionKeys.PREDICTIONS] for x in est.predict(predict_input_fn) ]) self.assertAllEqual((batch_size, label_dimension), predictions.shape) # EXPORT feature_spec = feature_column.make_parse_example_spec(feature_columns) serving_input_receiver_fn = export.build_parsing_serving_input_receiver_fn( feature_spec) export_dir = est.export_savedmodel(tempfile.mkdtemp(), serving_input_receiver_fn) self.assertTrue(gfile.Exists(export_dir)) def test_numpy_input_fn(self): """Tests complete flow with numpy_input_fn.""" label_dimension = 2 batch_size = 10 data = np.linspace(0., 2., batch_size * label_dimension, dtype=np.float32) data = data.reshape(batch_size, label_dimension) # learn y = x train_input_fn = numpy_io.numpy_input_fn( x={'x': data}, y=data, batch_size=batch_size, num_epochs=None, shuffle=True) eval_input_fn = numpy_io.numpy_input_fn( x={'x': data}, y=data, batch_size=batch_size, shuffle=False) predict_input_fn = numpy_io.numpy_input_fn( x={'x': data}, batch_size=batch_size, shuffle=False) self._test_complete_flow( train_input_fn=train_input_fn, eval_input_fn=eval_input_fn, predict_input_fn=predict_input_fn, input_dimension=label_dimension, label_dimension=label_dimension, batch_size=batch_size) def test_pandas_input_fn(self): """Tests complete flow with pandas_input_fn.""" if not HAS_PANDAS: return label_dimension = 1 batch_size = 10 data = np.linspace(0., 2., batch_size, dtype=np.float32) x = pd.DataFrame({'x': data}) y = pd.Series(data) train_input_fn = pandas_io.pandas_input_fn( x=x, y=y, batch_size=batch_size, num_epochs=None, shuffle=True) eval_input_fn = pandas_io.pandas_input_fn( x=x, y=y, batch_size=batch_size, shuffle=False) predict_input_fn = pandas_io.pandas_input_fn( x=x, batch_size=batch_size, shuffle=False) self._test_complete_flow( train_input_fn=train_input_fn, eval_input_fn=eval_input_fn, predict_input_fn=predict_input_fn, input_dimension=label_dimension, label_dimension=label_dimension, batch_size=batch_size) def test_input_fn_from_parse_example(self): """Tests complete flow with input_fn constructed from parse_example.""" label_dimension = 2 batch_size = 10 data = np.linspace(0., 2., batch_size * label_dimension, dtype=np.float32) data = data.reshape(batch_size, label_dimension) serialized_examples = [] for datum in data: example = example_pb2.Example(features=feature_pb2.Features( feature={ 'x': feature_pb2.Feature( float_list=feature_pb2.FloatList(value=datum)), 'y': feature_pb2.Feature( float_list=feature_pb2.FloatList(value=datum)), })) serialized_examples.append(example.SerializeToString()) feature_spec = { 'x': parsing_ops.FixedLenFeature([label_dimension], dtypes.float32), 'y': parsing_ops.FixedLenFeature([label_dimension], dtypes.float32), } def _train_input_fn(): feature_map = parsing_ops.parse_example(serialized_examples, feature_spec) features = linear_testing_utils.queue_parsed_features(feature_map) labels = features.pop('y') return features, labels def _eval_input_fn(): feature_map = parsing_ops.parse_example( input_lib.limit_epochs(serialized_examples, num_epochs=1), feature_spec) features = linear_testing_utils.queue_parsed_features(feature_map) labels = features.pop('y') return features, labels def _predict_input_fn(): feature_map = parsing_ops.parse_example( input_lib.limit_epochs(serialized_examples, num_epochs=1), feature_spec) features = linear_testing_utils.queue_parsed_features(feature_map) features.pop('y') return features, None self._test_complete_flow( train_input_fn=_train_input_fn, eval_input_fn=_eval_input_fn, predict_input_fn=_predict_input_fn, input_dimension=label_dimension, label_dimension=label_dimension, batch_size=batch_size) # A function to mimic dnn-classifier init reuse same tests. def _dnn_classifier_fn(hidden_units, feature_columns, model_dir=None, n_classes=2, weight_column=None, label_vocabulary=None, optimizer='Adagrad', config=None, input_layer_partitioner=None): return dnn_linear_combined.DNNLinearCombinedClassifier( model_dir=model_dir, dnn_hidden_units=hidden_units, dnn_feature_columns=feature_columns, dnn_optimizer=optimizer, n_classes=n_classes, weight_column=weight_column, label_vocabulary=label_vocabulary, input_layer_partitioner=input_layer_partitioner, config=config) class DNNOnlyClassifierEvaluateTest( dnn_testing_utils.BaseDNNClassifierEvaluateTest, test.TestCase): def __init__(self, methodName='runTest'): # pylint: disable=invalid-name test.TestCase.__init__(self, methodName) dnn_testing_utils.BaseDNNClassifierEvaluateTest.__init__( self, _dnn_classifier_fn) class DNNOnlyClassifierPredictTest( dnn_testing_utils.BaseDNNClassifierPredictTest, test.TestCase): def __init__(self, methodName='runTest'): # pylint: disable=invalid-name test.TestCase.__init__(self, methodName) dnn_testing_utils.BaseDNNClassifierPredictTest.__init__( self, _dnn_classifier_fn) class DNNOnlyClassifierTrainTest( dnn_testing_utils.BaseDNNClassifierTrainTest, test.TestCase): def __init__(self, methodName='runTest'): # pylint: disable=invalid-name test.TestCase.__init__(self, methodName) dnn_testing_utils.BaseDNNClassifierTrainTest.__init__( self, _dnn_classifier_fn) # A function to mimic dnn-regressor init reuse same tests. def _dnn_regressor_fn(hidden_units, feature_columns, model_dir=None, label_dimension=1, weight_column=None, optimizer='Adagrad', config=None, input_layer_partitioner=None): return dnn_linear_combined.DNNLinearCombinedRegressor( model_dir=model_dir, dnn_hidden_units=hidden_units, dnn_feature_columns=feature_columns, dnn_optimizer=optimizer, label_dimension=label_dimension, weight_column=weight_column, input_layer_partitioner=input_layer_partitioner, config=config) class DNNOnlyRegressorEvaluateTest( dnn_testing_utils.BaseDNNRegressorEvaluateTest, test.TestCase): def __init__(self, methodName='runTest'): # pylint: disable=invalid-name test.TestCase.__init__(self, methodName) dnn_testing_utils.BaseDNNRegressorEvaluateTest.__init__( self, _dnn_regressor_fn) class DNNOnlyRegressorPredictTest( dnn_testing_utils.BaseDNNRegressorPredictTest, test.TestCase): def __init__(self, methodName='runTest'): # pylint: disable=invalid-name test.TestCase.__init__(self, methodName) dnn_testing_utils.BaseDNNRegressorPredictTest.__init__( self, _dnn_regressor_fn) class DNNOnlyRegressorTrainTest( dnn_testing_utils.BaseDNNRegressorTrainTest, test.TestCase): def __init__(self, methodName='runTest'): # pylint: disable=invalid-name test.TestCase.__init__(self, methodName) dnn_testing_utils.BaseDNNRegressorTrainTest.__init__( self, _dnn_regressor_fn) class DNNLinearCombinedClassifierIntegrationTest(test.TestCase): def setUp(self): self._model_dir = tempfile.mkdtemp() def tearDown(self): if self._model_dir: writer_cache.FileWriterCache.clear() shutil.rmtree(self._model_dir) def _as_label(self, data_in_float): return np.rint(data_in_float).astype(np.int64) def _test_complete_flow( self, train_input_fn, eval_input_fn, predict_input_fn, input_dimension, n_classes, batch_size): linear_feature_columns = [ feature_column.numeric_column('x', shape=(input_dimension,))] dnn_feature_columns = [ feature_column.numeric_column('x', shape=(input_dimension,))] feature_columns = linear_feature_columns + dnn_feature_columns est = dnn_linear_combined.DNNLinearCombinedClassifier( linear_feature_columns=linear_feature_columns, dnn_hidden_units=(2, 2), dnn_feature_columns=dnn_feature_columns, n_classes=n_classes, model_dir=self._model_dir) # TRAIN num_steps = 10 est.train(train_input_fn, steps=num_steps) # EVALUTE scores = est.evaluate(eval_input_fn) self.assertEqual(num_steps, scores[ops.GraphKeys.GLOBAL_STEP]) self.assertIn('loss', six.iterkeys(scores)) # PREDICT predicted_proba = np.array([ x[prediction_keys.PredictionKeys.PROBABILITIES] for x in est.predict(predict_input_fn) ]) self.assertAllEqual((batch_size, n_classes), predicted_proba.shape) # EXPORT feature_spec = feature_column.make_parse_example_spec(feature_columns) serving_input_receiver_fn = export.build_parsing_serving_input_receiver_fn( feature_spec) export_dir = est.export_savedmodel(tempfile.mkdtemp(), serving_input_receiver_fn) self.assertTrue(gfile.Exists(export_dir)) def test_numpy_input_fn(self): """Tests complete flow with numpy_input_fn.""" n_classes = 3 input_dimension = 2 batch_size = 10 data = np.linspace( 0., n_classes - 1., batch_size * input_dimension, dtype=np.float32) x_data = data.reshape(batch_size, input_dimension) y_data = self._as_label(np.reshape(data[:batch_size], (batch_size, 1))) # learn y = x train_input_fn = numpy_io.numpy_input_fn( x={'x': x_data}, y=y_data, batch_size=batch_size, num_epochs=None, shuffle=True) eval_input_fn = numpy_io.numpy_input_fn( x={'x': x_data}, y=y_data, batch_size=batch_size, shuffle=False) predict_input_fn = numpy_io.numpy_input_fn( x={'x': x_data}, batch_size=batch_size, shuffle=False) self._test_complete_flow( train_input_fn=train_input_fn, eval_input_fn=eval_input_fn, predict_input_fn=predict_input_fn, input_dimension=input_dimension, n_classes=n_classes, batch_size=batch_size) def test_pandas_input_fn(self): """Tests complete flow with pandas_input_fn.""" if not HAS_PANDAS: return input_dimension = 1 n_classes = 2 batch_size = 10 data = np.linspace(0., n_classes - 1., batch_size, dtype=np.float32) x = pd.DataFrame({'x': data}) y = pd.Series(self._as_label(data)) train_input_fn = pandas_io.pandas_input_fn( x=x, y=y, batch_size=batch_size, num_epochs=None, shuffle=True) eval_input_fn = pandas_io.pandas_input_fn( x=x, y=y, batch_size=batch_size, shuffle=False) predict_input_fn = pandas_io.pandas_input_fn( x=x, batch_size=batch_size, shuffle=False) self._test_complete_flow( train_input_fn=train_input_fn, eval_input_fn=eval_input_fn, predict_input_fn=predict_input_fn, input_dimension=input_dimension, n_classes=n_classes, batch_size=batch_size) def test_input_fn_from_parse_example(self): """Tests complete flow with input_fn constructed from parse_example.""" input_dimension = 2 n_classes = 3 batch_size = 10 data = np.linspace(0., n_classes-1., batch_size * input_dimension, dtype=np.float32) data = data.reshape(batch_size, input_dimension) serialized_examples = [] for datum in data: example = example_pb2.Example(features=feature_pb2.Features( feature={ 'x': feature_pb2.Feature(float_list=feature_pb2.FloatList( value=datum)), 'y': feature_pb2.Feature(int64_list=feature_pb2.Int64List( value=self._as_label(datum[:1]))), })) serialized_examples.append(example.SerializeToString()) feature_spec = { 'x': parsing_ops.FixedLenFeature([input_dimension], dtypes.float32), 'y': parsing_ops.FixedLenFeature([1], dtypes.int64), } def _train_input_fn(): feature_map = parsing_ops.parse_example(serialized_examples, feature_spec) features = linear_testing_utils.queue_parsed_features(feature_map) labels = features.pop('y') return features, labels def _eval_input_fn(): feature_map = parsing_ops.parse_example( input_lib.limit_epochs(serialized_examples, num_epochs=1), feature_spec) features = linear_testing_utils.queue_parsed_features(feature_map) labels = features.pop('y') return features, labels def _predict_input_fn(): feature_map = parsing_ops.parse_example( input_lib.limit_epochs(serialized_examples, num_epochs=1), feature_spec) features = linear_testing_utils.queue_parsed_features(feature_map) features.pop('y') return features, None self._test_complete_flow( train_input_fn=_train_input_fn, eval_input_fn=_eval_input_fn, predict_input_fn=_predict_input_fn, input_dimension=input_dimension, n_classes=n_classes, batch_size=batch_size) class DNNLinearCombinedTests(test.TestCase): def setUp(self): self._model_dir = tempfile.mkdtemp() def tearDown(self): if self._model_dir: shutil.rmtree(self._model_dir) def _mock_optimizer(self, real_optimizer, var_name_prefix): """Verifies global_step is None and var_names start with given prefix.""" def _minimize(loss, global_step=None, var_list=None): self.assertIsNone(global_step) trainable_vars = var_list or ops.get_collection( ops.GraphKeys.TRAINABLE_VARIABLES) var_names = [var.name for var in trainable_vars] self.assertTrue( all([name.startswith(var_name_prefix) for name in var_names])) # var is used to check this op called by training. var = variables_lib.Variable(0., name=(var_name_prefix + '_called')) with ops.control_dependencies([var.assign(100.)]): return real_optimizer.minimize(loss, global_step, var_list) optimizer_mock = test.mock.NonCallableMagicMock( spec=optimizer_lib.Optimizer, wraps=real_optimizer) optimizer_mock.minimize = test.mock.MagicMock(wraps=_minimize) return optimizer_mock def test_train_op_calls_both_dnn_and_linear(self): opt = gradient_descent.GradientDescentOptimizer(1.) x_column = feature_column.numeric_column('x') input_fn = numpy_io.numpy_input_fn( x={'x': np.array([[0.], [1.]])}, y=np.array([[0.], [1.]]), batch_size=1, shuffle=False) est = dnn_linear_combined.DNNLinearCombinedClassifier( linear_feature_columns=[x_column], # verifies linear_optimizer is used only for linear part. linear_optimizer=self._mock_optimizer(opt, 'linear'), dnn_hidden_units=(2, 2), dnn_feature_columns=[x_column], # verifies dnn_optimizer is used only for linear part. dnn_optimizer=self._mock_optimizer(opt, 'dnn'), model_dir=self._model_dir) est.train(input_fn, steps=1) # verifies train_op fires linear minimize op self.assertEqual(100., checkpoint_utils.load_variable( self._model_dir, 'binary_logistic_head/linear_called')) # verifies train_op fires dnn minimize op self.assertEqual(100., checkpoint_utils.load_variable( self._model_dir, 'binary_logistic_head/dnn_called')) def test_dnn_and_linear_logits_are_added(self): with ops.Graph().as_default(): variables_lib.Variable([[1.0]], name='linear/linear_model/x/weights') variables_lib.Variable([2.0], name='linear/linear_model/bias_weights') variables_lib.Variable([[3.0]], name='dnn/hiddenlayer_0/kernel') variables_lib.Variable([4.0], name='dnn/hiddenlayer_0/bias') variables_lib.Variable([[5.0]], name='dnn/logits/kernel') variables_lib.Variable([6.0], name='dnn/logits/bias') variables_lib.Variable(1, name='global_step', dtype=dtypes.int64) linear_testing_utils.save_variables_to_ckpt(self._model_dir) x_column = feature_column.numeric_column('x') est = dnn_linear_combined.DNNLinearCombinedRegressor( linear_feature_columns=[x_column], dnn_hidden_units=[1], dnn_feature_columns=[x_column], model_dir=self._model_dir) input_fn = numpy_io.numpy_input_fn( x={'x': np.array([[10.]])}, batch_size=1, shuffle=False) # linear logits = 10*1 + 2 = 12 # dnn logits = (10*3 + 4)*5 + 6 = 176 # logits = dnn + linear = 176 + 12 = 188 self.assertAllClose( { prediction_keys.PredictionKeys.PREDICTIONS: [188.], }, next(est.predict(input_fn=input_fn))) if __name__ == '__main__': test.main()
apache-2.0
cpcloud/ibis
ibis/pandas/execution/tests/test_join.py
1
13150
import pandas as pd import pandas.util.testing as tm import pytest from pytest import param import ibis import ibis.common.exceptions as com pytestmark = pytest.mark.pandas join_type = pytest.mark.parametrize( 'how', [ 'inner', 'left', 'right', 'outer', param( 'semi', marks=pytest.mark.xfail( raises=NotImplementedError, reason='Semi join not implemented' ), ), param( 'anti', marks=pytest.mark.xfail( raises=NotImplementedError, reason='Anti join not implemented' ), ), ], ) @join_type def test_join(how, left, right, df1, df2): expr = left.join(right, left.key == right.key, how=how)[ left, right.other_value, right.key3 ] result = expr.execute() expected = pd.merge(df1, df2, how=how, on='key') tm.assert_frame_equal(result[expected.columns], expected) def test_cross_join(left, right, df1, df2): expr = left.cross_join(right)[left, right.other_value, right.key3] result = expr.execute() expected = pd.merge( df1.assign(dummy=1), df2.assign(dummy=1), how='inner', on='dummy' ).rename(columns=dict(key_x='key')) del expected['dummy'], expected['key_y'] tm.assert_frame_equal(result[expected.columns], expected) @join_type def test_join_project_left_table(how, left, right, df1, df2): expr = left.join(right, left.key == right.key, how=how)[left, right.key3] result = expr.execute() expected = pd.merge(df1, df2, how=how, on='key')[ list(left.columns) + ['key3'] ] tm.assert_frame_equal(result[expected.columns], expected) def test_cross_join_project_left_table(left, right, df1, df2): expr = left.cross_join(right)[left, right.key3] result = expr.execute() expected = pd.merge( df1.assign(dummy=1), df2.assign(dummy=1), how='inner', on='dummy' ).rename(columns=dict(key_x='key'))[list(left.columns) + ['key3']] tm.assert_frame_equal(result[expected.columns], expected) @join_type def test_join_with_multiple_predicates(how, left, right, df1, df2): expr = left.join( right, [left.key == right.key, left.key2 == right.key3], how=how )[left, right.key3, right.other_value] result = expr.execute() expected = pd.merge( df1, df2, how=how, left_on=['key', 'key2'], right_on=['key', 'key3'] ).reset_index(drop=True) tm.assert_frame_equal(result[expected.columns], expected) @join_type def test_join_with_multiple_predicates_written_as_one( how, left, right, df1, df2 ): predicate = (left.key == right.key) & (left.key2 == right.key3) expr = left.join(right, predicate, how=how)[ left, right.key3, right.other_value ] result = expr.execute() expected = pd.merge( df1, df2, how=how, left_on=['key', 'key2'], right_on=['key', 'key3'] ).reset_index(drop=True) tm.assert_frame_equal(result[expected.columns], expected) @join_type def test_join_with_invalid_predicates(how, left, right): predicate = (left.key == right.key) & (left.key2 <= right.key3) expr = left.join(right, predicate, how=how) with pytest.raises(TypeError): expr.execute() predicate = left.key >= right.key expr = left.join(right, predicate, how=how) with pytest.raises(TypeError): expr.execute() @join_type @pytest.mark.xfail(reason='Hard to detect this case') def test_join_with_duplicate_non_key_columns(how, left, right, df1, df2): left = left.mutate(x=left.value * 2) right = right.mutate(x=right.other_value * 3) expr = left.join(right, left.key == right.key, how=how) # This is undefined behavior because `x` is duplicated. This is difficult # to detect with pytest.raises(ValueError): expr.execute() @join_type def test_join_with_duplicate_non_key_columns_not_selected( how, left, right, df1, df2 ): left = left.mutate(x=left.value * 2) right = right.mutate(x=right.other_value * 3) right = right[['key', 'other_value']] expr = left.join(right, left.key == right.key, how=how)[ left, right.other_value ] result = expr.execute() expected = pd.merge( df1.assign(x=df1.value * 2), df2[['key', 'other_value']], how=how, on='key', ) tm.assert_frame_equal(result[expected.columns], expected) @join_type def test_join_with_post_expression_selection(how, left, right, df1, df2): join = left.join(right, left.key == right.key, how=how) expr = join[left.key, left.value, right.other_value] result = expr.execute() expected = pd.merge(df1, df2, on='key', how=how)[ ['key', 'value', 'other_value'] ] tm.assert_frame_equal(result[expected.columns], expected) @join_type def test_join_with_post_expression_filter(how, left): lhs = left[['key', 'key2']] rhs = left[['key2', 'value']] joined = lhs.join(rhs, 'key2', how=how) projected = joined[lhs, rhs.value] expr = projected[projected.value == 4] result = expr.execute() df1 = lhs.execute() df2 = rhs.execute() expected = pd.merge(df1, df2, on='key2', how=how) expected = expected.loc[expected.value == 4].reset_index(drop=True) tm.assert_frame_equal(result, expected) @join_type def test_multi_join_with_post_expression_filter(how, left, df1): lhs = left[['key', 'key2']] rhs = left[['key2', 'value']] rhs2 = left[['key2', 'value']].relabel(dict(value='value2')) joined = lhs.join(rhs, 'key2', how=how) projected = joined[lhs, rhs.value] filtered = projected[projected.value == 4] joined2 = filtered.join(rhs2, 'key2') projected2 = joined2[filtered.key, rhs2.value2] expr = projected2[projected2.value2 == 3] result = expr.execute() df1 = lhs.execute() df2 = rhs.execute() df3 = rhs2.execute() expected = pd.merge(df1, df2, on='key2', how=how) expected = expected.loc[expected.value == 4].reset_index(drop=True) expected = pd.merge(expected, df3, on='key2')[['key', 'value2']] expected = expected.loc[expected.value2 == 3].reset_index(drop=True) tm.assert_frame_equal(result, expected) @join_type def test_join_with_non_trivial_key(how, left, right, df1, df2): # also test that the order of operands in the predicate doesn't matter join = left.join(right, right.key.length() == left.key.length(), how=how) expr = join[left.key, left.value, right.other_value] result = expr.execute() expected = ( pd.merge( df1.assign(key_len=df1.key.str.len()), df2.assign(key_len=df2.key.str.len()), on='key_len', how=how, ) .drop(['key_len', 'key_y', 'key2', 'key3'], axis=1) .rename(columns={'key_x': 'key'}) ) tm.assert_frame_equal(result[expected.columns], expected) @join_type def test_join_with_non_trivial_key_project_table(how, left, right, df1, df2): # also test that the order of operands in the predicate doesn't matter join = left.join(right, right.key.length() == left.key.length(), how=how) expr = join[left, right.other_value] expr = expr[expr.key.length() == 1] result = expr.execute() expected = ( pd.merge( df1.assign(key_len=df1.key.str.len()), df2.assign(key_len=df2.key.str.len()), on='key_len', how=how, ) .drop(['key_len', 'key_y', 'key2', 'key3'], axis=1) .rename(columns={'key_x': 'key'}) ) expected = expected.loc[expected.key.str.len() == 1] tm.assert_frame_equal(result[expected.columns], expected) @join_type def test_join_with_project_right_duplicate_column(client, how, left, df1, df3): # also test that the order of operands in the predicate doesn't matter right = client.table('df3') join = left.join(right, ['key'], how=how) expr = join[left.key, right.key2, right.other_value] result = expr.execute() expected = ( pd.merge(df1, df3, on='key', how=how) .drop(['key2_x', 'key3', 'value'], axis=1) .rename(columns={'key2_y': 'key2'}) ) tm.assert_frame_equal(result[expected.columns], expected) def test_join_with_window_function( players_base, players_df, batting, batting_df ): players = players_base # this should be semi_join tbl = batting.left_join(players, ['playerID']) t = tbl[batting.G, batting.playerID, batting.teamID] expr = t.groupby(t.teamID).mutate( team_avg=lambda d: d.G.mean(), demeaned_by_player=lambda d: d.G - d.G.mean(), ) result = expr.execute() expected = pd.merge( batting_df, players_df[['playerID']], on='playerID', how='left' )[['G', 'playerID', 'teamID']] team_avg = expected.groupby('teamID').G.transform('mean') expected = expected.assign( team_avg=team_avg, demeaned_by_player=lambda df: df.G - team_avg ) tm.assert_frame_equal(result[expected.columns], expected) merge_asof_minversion = pytest.mark.skipif( pd.__version__ < '0.19.2', reason="at least pandas-0.19.2 required for merge_asof", ) @merge_asof_minversion def test_asof_join(time_left, time_right, time_df1, time_df2): expr = time_left.asof_join(time_right, 'time')[ time_left, time_right.other_value ] result = expr.execute() expected = pd.merge_asof(time_df1, time_df2, on='time') tm.assert_frame_equal(result[expected.columns], expected) @merge_asof_minversion def test_asof_join_predicate(time_left, time_right, time_df1, time_df2): expr = time_left.asof_join(time_right, time_left.time == time_right.time)[ time_left, time_right.other_value ] result = expr.execute() expected = pd.merge_asof(time_df1, time_df2, on='time') tm.assert_frame_equal(result[expected.columns], expected) @merge_asof_minversion def test_keyed_asof_join( time_keyed_left, time_keyed_right, time_keyed_df1, time_keyed_df2 ): expr = time_keyed_left.asof_join(time_keyed_right, 'time', by='key')[ time_keyed_left, time_keyed_right.other_value ] result = expr.execute() expected = pd.merge_asof( time_keyed_df1, time_keyed_df2, on='time', by='key' ) tm.assert_frame_equal(result[expected.columns], expected) @merge_asof_minversion def test_keyed_asof_join_with_tolerance( time_keyed_left, time_keyed_right, time_keyed_df1, time_keyed_df2 ): expr = time_keyed_left.asof_join( time_keyed_right, 'time', by='key', tolerance=2 * ibis.interval(days=1) )[time_keyed_left, time_keyed_right.other_value] result = expr.execute() expected = pd.merge_asof( time_keyed_df1, time_keyed_df2, on='time', by='key', tolerance=pd.Timedelta('2D'), ) tm.assert_frame_equal(result[expected.columns], expected) @pytest.mark.parametrize( "how", [ "left", pytest.param( "right", marks=pytest.mark.xfail( raises=AttributeError, reason="right_join is not an ibis API" ), ), "inner", "outer", ], ) @pytest.mark.parametrize( "func", [ pytest.param(lambda join: join["a0", "a1"], id="tuple"), pytest.param(lambda join: join[["a0", "a1"]], id="list"), pytest.param(lambda join: join.select(["a0", "a1"]), id="select"), ], ) @pytest.mark.xfail( raises=(com.IbisError, AttributeError), reason="Select from unambiguous joins not implemented", ) def test_select_on_unambiguous_join(how, func): df_t = pd.DataFrame(dict(a0=[1, 2, 3], b1=list("aab"))) df_s = pd.DataFrame(dict(a1=[2, 3, 4], b2=list("abc"))) con = ibis.pandas.connect({"t": df_t, "s": df_s}) t = con.table("t") s = con.table("s") method = getattr(t, "{}_join".format(how)) join = method(s, t.b1 == s.b2) expected = pd.merge(df_t, df_s, left_on=["b1"], right_on=["b2"], how=how)[ ["a0", "a1"] ] assert not expected.empty expr = func(join) result = expr.execute() tm.assert_frame_equal(result, expected) @pytest.mark.parametrize( "func", [ pytest.param(lambda join: join["a0", "a1"], id="tuple"), pytest.param(lambda join: join[["a0", "a1"]], id="list"), pytest.param(lambda join: join.select(["a0", "a1"]), id="select"), ], ) @pytest.mark.xfail( raises=(com.IbisError, AttributeError), reason="Select from unambiguous joins not implemented", ) @merge_asof_minversion def test_select_on_unambiguous_asof_join(func): df_t = pd.DataFrame( dict(a0=[1, 2, 3], b1=pd.date_range("20180101", periods=3)) ) df_s = pd.DataFrame( dict(a1=[2, 3, 4], b2=pd.date_range("20171230", periods=3)) ) con = ibis.pandas.connect({"t": df_t, "s": df_s}) t = con.table("t") s = con.table("s") join = t.asof_join(s, t.b1 == s.b2) expected = pd.merge_asof(df_t, df_s, left_on=["b1"], right_on=["b2"])[ ["a0", "a1"] ] assert not expected.empty expr = func(join) result = expr.execute() tm.assert_frame_equal(result, expected)
apache-2.0
harisbal/pandas
pandas/core/tools/datetimes.py
4
30680
from functools import partial from datetime import datetime, time from collections import MutableMapping import numpy as np from pandas._libs import tslib, tslibs from pandas._libs.tslibs.strptime import array_strptime from pandas._libs.tslibs import parsing, conversion, Timestamp from pandas._libs.tslibs.parsing import ( # noqa parse_time_string, DateParseError, _format_is_iso, _guess_datetime_format) from pandas.core.dtypes.common import ( ensure_object, is_datetime64_ns_dtype, is_datetime64_dtype, is_datetime64tz_dtype, is_integer_dtype, is_integer, is_float, is_list_like, is_scalar, is_numeric_dtype, is_object_dtype) from pandas.core.dtypes.generic import ( ABCIndexClass, ABCSeries, ABCDataFrame) from pandas.core.dtypes.missing import notna from pandas.core import algorithms from pandas.compat import zip def _guess_datetime_format_for_array(arr, **kwargs): # Try to guess the format based on the first non-NaN element non_nan_elements = notna(arr).nonzero()[0] if len(non_nan_elements): return _guess_datetime_format(arr[non_nan_elements[0]], **kwargs) def _maybe_cache(arg, format, cache, convert_listlike): """ Create a cache of unique dates from an array of dates Parameters ---------- arg : integer, float, string, datetime, list, tuple, 1-d array, Series format : string Strftime format to parse time cache : boolean True attempts to create a cache of converted values convert_listlike : function Conversion function to apply on dates Returns ------- cache_array : Series Cache of converted, unique dates. Can be empty """ from pandas import Series cache_array = Series() if cache: # Perform a quicker unique check from pandas import Index if not Index(arg).is_unique: unique_dates = algorithms.unique(arg) cache_dates = convert_listlike(unique_dates, True, format) cache_array = Series(cache_dates, index=unique_dates) return cache_array def _convert_and_box_cache(arg, cache_array, box, errors, name=None): """ Convert array of dates with a cache and box the result Parameters ---------- arg : integer, float, string, datetime, list, tuple, 1-d array, Series cache_array : Series Cache of converted, unique dates box : boolean True boxes result as an Index-like, False returns an ndarray errors : string 'ignore' plus box=True will convert result to Index name : string, default None Name for a DatetimeIndex Returns ------- result : datetime of converted dates Returns: - Index-like if box=True - ndarray if box=False """ from pandas import Series, DatetimeIndex, Index result = Series(arg).map(cache_array) if box: if errors == 'ignore': return Index(result, name=name) else: return DatetimeIndex(result, name=name) return result.values def _return_parsed_timezone_results(result, timezones, box, tz, name): """ Return results from array_strptime if a %z or %Z directive was passed. Parameters ---------- result : ndarray int64 date representations of the dates timezones : ndarray pytz timezone objects box : boolean True boxes result as an Index-like, False returns an ndarray tz : object None or pytz timezone object name : string, default None Name for a DatetimeIndex Returns ------- tz_result : ndarray of parsed dates with timezone Returns: - Index-like if box=True - ndarray of Timestamps if box=False """ if tz is not None: raise ValueError("Cannot pass a tz argument when " "parsing strings with timezone " "information.") tz_results = np.array([Timestamp(res).tz_localize(zone) for res, zone in zip(result, timezones)]) if box: from pandas import Index return Index(tz_results, name=name) return tz_results def _convert_listlike_datetimes(arg, box, format, name=None, tz=None, unit=None, errors=None, infer_datetime_format=None, dayfirst=None, yearfirst=None, exact=None): """ Helper function for to_datetime. Performs the conversions of 1D listlike of dates Parameters ---------- arg : list, tuple, ndarray, Series, Index date to be parced box : boolean True boxes result as an Index-like, False returns an ndarray name : object None or string for the Index name tz : object None or 'utc' unit : string None or string of the frequency of the passed data errors : string error handing behaviors from to_datetime, 'raise', 'coerce', 'ignore' infer_datetime_format : boolean inferring format behavior from to_datetime dayfirst : boolean dayfirst parsing behavior from to_datetime yearfirst : boolean yearfirst parsing behavior from to_datetime exact : boolean exact format matching behavior from to_datetime Returns ------- ndarray of parsed dates Returns: - Index-like if box=True - ndarray of Timestamps if box=False """ from pandas import DatetimeIndex if isinstance(arg, (list, tuple)): arg = np.array(arg, dtype='O') # these are shortcutable if is_datetime64tz_dtype(arg): if not isinstance(arg, DatetimeIndex): return DatetimeIndex(arg, tz=tz, name=name) if tz == 'utc': arg = arg.tz_convert(None).tz_localize(tz) return arg elif is_datetime64_ns_dtype(arg): if box and not isinstance(arg, DatetimeIndex): try: return DatetimeIndex(arg, tz=tz, name=name) except ValueError: pass return arg elif unit is not None: if format is not None: raise ValueError("cannot specify both format and unit") arg = getattr(arg, 'values', arg) result = tslib.array_with_unit_to_datetime(arg, unit, errors=errors) if box: if errors == 'ignore': from pandas import Index return Index(result, name=name) return DatetimeIndex(result, tz=tz, name=name) return result elif getattr(arg, 'ndim', 1) > 1: raise TypeError('arg must be a string, datetime, list, tuple, ' '1-d array, or Series') arg = ensure_object(arg) require_iso8601 = False if infer_datetime_format and format is None: format = _guess_datetime_format_for_array(arg, dayfirst=dayfirst) if format is not None: # There is a special fast-path for iso8601 formatted # datetime strings, so in those cases don't use the inferred # format because this path makes process slower in this # special case format_is_iso8601 = _format_is_iso(format) if format_is_iso8601: require_iso8601 = not infer_datetime_format format = None try: result = None if format is not None: # shortcut formatting here if format == '%Y%m%d': try: result = _attempt_YYYYMMDD(arg, errors=errors) except (ValueError, TypeError, tslibs.OutOfBoundsDatetime): raise ValueError("cannot convert the input to " "'%Y%m%d' date format") # fallback if result is None: try: result, timezones = array_strptime( arg, format, exact=exact, errors=errors) if '%Z' in format or '%z' in format: return _return_parsed_timezone_results( result, timezones, box, tz, name) except tslibs.OutOfBoundsDatetime: if errors == 'raise': raise result = arg except ValueError: # if format was inferred, try falling back # to array_to_datetime - terminate here # for specified formats if not infer_datetime_format: if errors == 'raise': raise result = arg if result is None and (format is None or infer_datetime_format): result, tz_parsed = tslib.array_to_datetime( arg, errors=errors, utc=tz == 'utc', dayfirst=dayfirst, yearfirst=yearfirst, require_iso8601=require_iso8601 ) if tz_parsed is not None: if box: # We can take a shortcut since the datetime64 numpy array # is in UTC return DatetimeIndex._simple_new(result, name=name, tz=tz_parsed) else: # Convert the datetime64 numpy array to an numpy array # of datetime objects result = [Timestamp(ts, tz=tz_parsed).to_pydatetime() for ts in result] return np.array(result, dtype=object) if box: # Ensure we return an Index in all cases where box=True if is_datetime64_dtype(result): return DatetimeIndex(result, tz=tz, name=name) elif is_object_dtype(result): # e.g. an Index of datetime objects from pandas import Index return Index(result, name=name) return result except ValueError as e: try: values, tz = conversion.datetime_to_datetime64(arg) return DatetimeIndex._simple_new(values, name=name, tz=tz) except (ValueError, TypeError): raise e def _adjust_to_origin(arg, origin, unit): """ Helper function for to_datetime. Adjust input argument to the specified origin Parameters ---------- arg : list, tuple, ndarray, Series, Index date to be adjusted origin : 'julian' or Timestamp origin offset for the arg unit : string passed unit from to_datetime, must be 'D' Returns ------- ndarray or scalar of adjusted date(s) """ if origin == 'julian': original = arg j0 = Timestamp(0).to_julian_date() if unit != 'D': raise ValueError("unit must be 'D' for origin='julian'") try: arg = arg - j0 except TypeError: raise ValueError("incompatible 'arg' type for given " "'origin'='julian'") # premptively check this for a nice range j_max = Timestamp.max.to_julian_date() - j0 j_min = Timestamp.min.to_julian_date() - j0 if np.any(arg > j_max) or np.any(arg < j_min): raise tslibs.OutOfBoundsDatetime( "{original} is Out of Bounds for " "origin='julian'".format(original=original)) else: # arg must be numeric if not ((is_scalar(arg) and (is_integer(arg) or is_float(arg))) or is_numeric_dtype(np.asarray(arg))): raise ValueError( "'{arg}' is not compatible with origin='{origin}'; " "it must be numeric with a unit specified ".format( arg=arg, origin=origin)) # we are going to offset back to unix / epoch time try: offset = Timestamp(origin) except tslibs.OutOfBoundsDatetime: raise tslibs.OutOfBoundsDatetime( "origin {origin} is Out of Bounds".format(origin=origin)) except ValueError: raise ValueError("origin {origin} cannot be converted " "to a Timestamp".format(origin=origin)) if offset.tz is not None: raise ValueError( "origin offset {} must be tz-naive".format(offset)) offset -= Timestamp(0) # convert the offset to the unit of the arg # this should be lossless in terms of precision offset = offset // tslibs.Timedelta(1, unit=unit) # scalars & ndarray-like can handle the addition if is_list_like(arg) and not isinstance( arg, (ABCSeries, ABCIndexClass, np.ndarray)): arg = np.asarray(arg) arg = arg + offset return arg def to_datetime(arg, errors='raise', dayfirst=False, yearfirst=False, utc=None, box=True, format=None, exact=True, unit=None, infer_datetime_format=False, origin='unix', cache=False): """ Convert argument to datetime. Parameters ---------- arg : integer, float, string, datetime, list, tuple, 1-d array, Series .. versionadded:: 0.18.1 or DataFrame/dict-like errors : {'ignore', 'raise', 'coerce'}, default 'raise' - If 'raise', then invalid parsing will raise an exception - If 'coerce', then invalid parsing will be set as NaT - If 'ignore', then invalid parsing will return the input dayfirst : boolean, default False Specify a date parse order if `arg` is str or its list-likes. If True, parses dates with the day first, eg 10/11/12 is parsed as 2012-11-10. Warning: dayfirst=True is not strict, but will prefer to parse with day first (this is a known bug, based on dateutil behavior). yearfirst : boolean, default False Specify a date parse order if `arg` is str or its list-likes. - If True parses dates with the year first, eg 10/11/12 is parsed as 2010-11-12. - If both dayfirst and yearfirst are True, yearfirst is preceded (same as dateutil). Warning: yearfirst=True is not strict, but will prefer to parse with year first (this is a known bug, based on dateutil behavior). .. versionadded:: 0.16.1 utc : boolean, default None Return UTC DatetimeIndex if True (converting any tz-aware datetime.datetime objects as well). box : boolean, default True - If True returns a DatetimeIndex or Index-like object - If False returns ndarray of values. format : string, default None strftime to parse time, eg "%d/%m/%Y", note that "%f" will parse all the way up to nanoseconds. exact : boolean, True by default - If True, require an exact format match. - If False, allow the format to match anywhere in the target string. unit : string, default 'ns' unit of the arg (D,s,ms,us,ns) denote the unit, which is an integer or float number. This will be based off the origin. Example, with unit='ms' and origin='unix' (the default), this would calculate the number of milliseconds to the unix epoch start. infer_datetime_format : boolean, default False If True and no `format` is given, attempt to infer the format of the datetime strings, and if it can be inferred, switch to a faster method of parsing them. In some cases this can increase the parsing speed by ~5-10x. origin : scalar, default is 'unix' Define the reference date. The numeric values would be parsed as number of units (defined by `unit`) since this reference date. - If 'unix' (or POSIX) time; origin is set to 1970-01-01. - If 'julian', unit must be 'D', and origin is set to beginning of Julian Calendar. Julian day number 0 is assigned to the day starting at noon on January 1, 4713 BC. - If Timestamp convertible, origin is set to Timestamp identified by origin. .. versionadded:: 0.20.0 cache : boolean, default False If True, use a cache of unique, converted dates to apply the datetime conversion. May produce significant speed-up when parsing duplicate date strings, especially ones with timezone offsets. .. versionadded:: 0.23.0 Returns ------- ret : datetime if parsing succeeded. Return type depends on input: - list-like: DatetimeIndex - Series: Series of datetime64 dtype - scalar: Timestamp In case when it is not possible to return designated types (e.g. when any element of input is before Timestamp.min or after Timestamp.max) return will have datetime.datetime type (or corresponding array/Series). Examples -------- Assembling a datetime from multiple columns of a DataFrame. The keys can be common abbreviations like ['year', 'month', 'day', 'minute', 'second', 'ms', 'us', 'ns']) or plurals of the same >>> df = pd.DataFrame({'year': [2015, 2016], 'month': [2, 3], 'day': [4, 5]}) >>> pd.to_datetime(df) 0 2015-02-04 1 2016-03-05 dtype: datetime64[ns] If a date does not meet the `timestamp limitations <http://pandas.pydata.org/pandas-docs/stable/timeseries.html #timeseries-timestamp-limits>`_, passing errors='ignore' will return the original input instead of raising any exception. Passing errors='coerce' will force an out-of-bounds date to NaT, in addition to forcing non-dates (or non-parseable dates) to NaT. >>> pd.to_datetime('13000101', format='%Y%m%d', errors='ignore') datetime.datetime(1300, 1, 1, 0, 0) >>> pd.to_datetime('13000101', format='%Y%m%d', errors='coerce') NaT Passing infer_datetime_format=True can often-times speedup a parsing if its not an ISO8601 format exactly, but in a regular format. >>> s = pd.Series(['3/11/2000', '3/12/2000', '3/13/2000']*1000) >>> s.head() 0 3/11/2000 1 3/12/2000 2 3/13/2000 3 3/11/2000 4 3/12/2000 dtype: object >>> %timeit pd.to_datetime(s,infer_datetime_format=True) 100 loops, best of 3: 10.4 ms per loop >>> %timeit pd.to_datetime(s,infer_datetime_format=False) 1 loop, best of 3: 471 ms per loop Using a unix epoch time >>> pd.to_datetime(1490195805, unit='s') Timestamp('2017-03-22 15:16:45') >>> pd.to_datetime(1490195805433502912, unit='ns') Timestamp('2017-03-22 15:16:45.433502912') .. warning:: For float arg, precision rounding might happen. To prevent unexpected behavior use a fixed-width exact type. Using a non-unix epoch origin >>> pd.to_datetime([1, 2, 3], unit='D', origin=pd.Timestamp('1960-01-01')) 0 1960-01-02 1 1960-01-03 2 1960-01-04 See also -------- pandas.DataFrame.astype : Cast argument to a specified dtype. pandas.to_timedelta : Convert argument to timedelta. """ if arg is None: return None if origin != 'unix': arg = _adjust_to_origin(arg, origin, unit) tz = 'utc' if utc else None convert_listlike = partial(_convert_listlike_datetimes, tz=tz, unit=unit, dayfirst=dayfirst, yearfirst=yearfirst, errors=errors, exact=exact, infer_datetime_format=infer_datetime_format) if isinstance(arg, Timestamp): result = arg elif isinstance(arg, ABCSeries): cache_array = _maybe_cache(arg, format, cache, convert_listlike) if not cache_array.empty: result = arg.map(cache_array) else: from pandas import Series values = convert_listlike(arg._values, True, format) result = Series(values, index=arg.index, name=arg.name) elif isinstance(arg, (ABCDataFrame, MutableMapping)): result = _assemble_from_unit_mappings(arg, errors=errors) elif isinstance(arg, ABCIndexClass): cache_array = _maybe_cache(arg, format, cache, convert_listlike) if not cache_array.empty: result = _convert_and_box_cache(arg, cache_array, box, errors, name=arg.name) else: convert_listlike = partial(convert_listlike, name=arg.name) result = convert_listlike(arg, box, format) elif is_list_like(arg): cache_array = _maybe_cache(arg, format, cache, convert_listlike) if not cache_array.empty: result = _convert_and_box_cache(arg, cache_array, box, errors) else: result = convert_listlike(arg, box, format) else: result = convert_listlike(np.array([arg]), box, format)[0] return result # mappings for assembling units _unit_map = {'year': 'year', 'years': 'year', 'month': 'month', 'months': 'month', 'day': 'day', 'days': 'day', 'hour': 'h', 'hours': 'h', 'minute': 'm', 'minutes': 'm', 'second': 's', 'seconds': 's', 'ms': 'ms', 'millisecond': 'ms', 'milliseconds': 'ms', 'us': 'us', 'microsecond': 'us', 'microseconds': 'us', 'ns': 'ns', 'nanosecond': 'ns', 'nanoseconds': 'ns' } def _assemble_from_unit_mappings(arg, errors): """ assemble the unit specified fields from the arg (DataFrame) Return a Series for actual parsing Parameters ---------- arg : DataFrame errors : {'ignore', 'raise', 'coerce'}, default 'raise' - If 'raise', then invalid parsing will raise an exception - If 'coerce', then invalid parsing will be set as NaT - If 'ignore', then invalid parsing will return the input Returns ------- Series """ from pandas import to_timedelta, to_numeric, DataFrame arg = DataFrame(arg) if not arg.columns.is_unique: raise ValueError("cannot assemble with duplicate keys") # replace passed unit with _unit_map def f(value): if value in _unit_map: return _unit_map[value] # m is case significant if value.lower() in _unit_map: return _unit_map[value.lower()] return value unit = {k: f(k) for k in arg.keys()} unit_rev = {v: k for k, v in unit.items()} # we require at least Ymd required = ['year', 'month', 'day'] req = sorted(list(set(required) - set(unit_rev.keys()))) if len(req): raise ValueError("to assemble mappings requires at least that " "[year, month, day] be specified: [{required}] " "is missing".format(required=','.join(req))) # keys we don't recognize excess = sorted(list(set(unit_rev.keys()) - set(_unit_map.values()))) if len(excess): raise ValueError("extra keys have been passed " "to the datetime assemblage: " "[{excess}]".format(excess=','.join(excess))) def coerce(values): # we allow coercion to if errors allows values = to_numeric(values, errors=errors) # prevent overflow in case of int8 or int16 if is_integer_dtype(values): values = values.astype('int64', copy=False) return values values = (coerce(arg[unit_rev['year']]) * 10000 + coerce(arg[unit_rev['month']]) * 100 + coerce(arg[unit_rev['day']])) try: values = to_datetime(values, format='%Y%m%d', errors=errors) except (TypeError, ValueError) as e: raise ValueError("cannot assemble the " "datetimes: {error}".format(error=e)) for u in ['h', 'm', 's', 'ms', 'us', 'ns']: value = unit_rev.get(u) if value is not None and value in arg: try: values += to_timedelta(coerce(arg[value]), unit=u, errors=errors) except (TypeError, ValueError) as e: raise ValueError("cannot assemble the datetimes [{value}]: " "{error}".format(value=value, error=e)) return values def _attempt_YYYYMMDD(arg, errors): """ try to parse the YYYYMMDD/%Y%m%d format, try to deal with NaT-like, arg is a passed in as an object dtype, but could really be ints/strings with nan-like/or floats (e.g. with nan) Parameters ---------- arg : passed value errors : 'raise','ignore','coerce' """ def calc(carg): # calculate the actual result carg = carg.astype(object) parsed = parsing.try_parse_year_month_day(carg / 10000, carg / 100 % 100, carg % 100) return tslib.array_to_datetime(parsed, errors=errors)[0] def calc_with_mask(carg, mask): result = np.empty(carg.shape, dtype='M8[ns]') iresult = result.view('i8') iresult[~mask] = tslibs.iNaT masked_result = calc(carg[mask].astype(np.float64).astype(np.int64)) result[mask] = masked_result.astype('M8[ns]') return result # try intlike / strings that are ints try: return calc(arg.astype(np.int64)) except ValueError: pass # a float with actual np.nan try: carg = arg.astype(np.float64) return calc_with_mask(carg, notna(carg)) except ValueError: pass # string with NaN-like try: mask = ~algorithms.isin(arg, list(tslib.nat_strings)) return calc_with_mask(arg, mask) except ValueError: pass return None # Fixed time formats for time parsing _time_formats = ["%H:%M", "%H%M", "%I:%M%p", "%I%M%p", "%H:%M:%S", "%H%M%S", "%I:%M:%S%p", "%I%M%S%p"] def _guess_time_format_for_array(arr): # Try to guess the format based on the first non-NaN element non_nan_elements = notna(arr).nonzero()[0] if len(non_nan_elements): element = arr[non_nan_elements[0]] for time_format in _time_formats: try: datetime.strptime(element, time_format) return time_format except ValueError: pass return None def to_time(arg, format=None, infer_time_format=False, errors='raise'): """ Parse time strings to time objects using fixed strptime formats ("%H:%M", "%H%M", "%I:%M%p", "%I%M%p", "%H:%M:%S", "%H%M%S", "%I:%M:%S%p", "%I%M%S%p") Use infer_time_format if all the strings are in the same format to speed up conversion. Parameters ---------- arg : string in time format, datetime.time, list, tuple, 1-d array, Series format : str, default None Format used to convert arg into a time object. If None, fixed formats are used. infer_time_format: bool, default False Infer the time format based on the first non-NaN element. If all strings are in the same format, this will speed up conversion. errors : {'ignore', 'raise', 'coerce'}, default 'raise' - If 'raise', then invalid parsing will raise an exception - If 'coerce', then invalid parsing will be set as None - If 'ignore', then invalid parsing will return the input Returns ------- datetime.time """ from pandas.core.series import Series def _convert_listlike(arg, format): if isinstance(arg, (list, tuple)): arg = np.array(arg, dtype='O') elif getattr(arg, 'ndim', 1) > 1: raise TypeError('arg must be a string, datetime, list, tuple, ' '1-d array, or Series') arg = ensure_object(arg) if infer_time_format and format is None: format = _guess_time_format_for_array(arg) times = [] if format is not None: for element in arg: try: times.append(datetime.strptime(element, format).time()) except (ValueError, TypeError): if errors == 'raise': msg = ("Cannot convert {element} to a time with given " "format {format}").format(element=element, format=format) raise ValueError(msg) elif errors == 'ignore': return arg else: times.append(None) else: formats = _time_formats[:] format_found = False for element in arg: time_object = None for time_format in formats: try: time_object = datetime.strptime(element, time_format).time() if not format_found: # Put the found format in front fmt = formats.pop(formats.index(time_format)) formats.insert(0, fmt) format_found = True break except (ValueError, TypeError): continue if time_object is not None: times.append(time_object) elif errors == 'raise': raise ValueError("Cannot convert arg {arg} to " "a time".format(arg=arg)) elif errors == 'ignore': return arg else: times.append(None) return times if arg is None: return arg elif isinstance(arg, time): return arg elif isinstance(arg, Series): values = _convert_listlike(arg._values, format) return Series(values, index=arg.index, name=arg.name) elif isinstance(arg, ABCIndexClass): return _convert_listlike(arg, format) elif is_list_like(arg): return _convert_listlike(arg, format) return _convert_listlike(np.array([arg]), format)[0]
bsd-3-clause
junbochen/pylearn2
pylearn2/scripts/papers/jia_huang_wkshp_11/evaluate.py
44
3208
from __future__ import print_function from optparse import OptionParser import warnings try: from sklearn.metrics import classification_report except ImportError: classification_report = None warnings.warn("couldn't find sklearn.metrics.classification_report") try: from sklearn.metrics import confusion_matrix except ImportError: confusion_matrix = None warnings.warn("couldn't find sklearn.metrics.metrics.confusion_matrix") from galatea.s3c.feature_loading import get_features from pylearn2.utils import serial from pylearn2.datasets.cifar10 import CIFAR10 from pylearn2.datasets.cifar100 import CIFAR100 import numpy as np def test(model, X, y): print("Evaluating svm") y_pred = model.predict(X) #try: if True: acc = (y == y_pred).mean() print("Accuracy ",acc) """except: print("something went wrong") print('y:') print(y) print('y_pred:') print(y_pred) print('extra info') print(type(y)) print(type(y_pred)) print(y.dtype) print(y_pred.dtype) print(y.shape) print(y_pred.shape) raise """ # def get_test_labels(cifar10, cifar100, stl10): assert cifar10 + cifar100 + stl10 == 1 if stl10: print('loading entire stl-10 test set just to get the labels') stl10 = serial.load("${PYLEARN2_DATA_PATH}/stl10/stl10_32x32/test.pkl") return stl10.y if cifar10: print('loading entire cifar10 test set just to get the labels') cifar10 = CIFAR10(which_set = 'test') return np.asarray(cifar10.y) if cifar100: print('loading entire cifar100 test set just to get the fine labels') cifar100 = CIFAR100(which_set = 'test') return np.asarray(cifar100.y_fine) assert False def main(model_path, test_path, dataset, **kwargs): model = serial.load(model_path) cifar100 = dataset == 'cifar100' cifar10 = dataset == 'cifar10' stl10 = dataset == 'stl10' assert cifar10 + cifar100 + stl10 == 1 y = get_test_labels(cifar10, cifar100, stl10) X = get_features(test_path, False, False) if stl10: num_examples = 8000 if cifar10 or cifar100: num_examples = 10000 if not X.shape[0] == num_examples: raise AssertionError('Expected %d examples but got %d' % (num_examples, X.shape[0])) assert y.shape[0] == num_examples test(model,X,y) if __name__ == '__main__': """ Useful for quick tests. Usage: python train_bilinear.py """ parser = OptionParser() parser.add_option("-m", "--model", action="store", type="string", dest="model_path") parser.add_option("-t", "--test", action="store", type="string", dest="test") parser.add_option("-o", action="store", dest="output", default = None, help="path to write the report to") parser.add_option('--dataset', type='string', dest = 'dataset', action='store', default = None) #(options, args) = parser.parse_args() #assert options.output main(model_path='final_model.pkl', test_path='test_features.npy', dataset = 'cifar100', )
bsd-3-clause
joergkappes/opengm
src/interfaces/python/examples/python_visitor_gui.py
14
1377
""" Usage: python_visitor_gui.py This script shows how one can implement visitors in pure python and inject them into OpenGM solver. ( not all OpenGM solvers support this kind of code injection ) """ import opengm import numpy import matplotlib from matplotlib import pyplot as plt shape=[100,100] numLabels=10 unaries=numpy.random.rand(shape[0], shape[1],numLabels) potts=opengm.PottsFunction([numLabels,numLabels],0.0,0.4) gm=opengm.grid2d2Order(unaries=unaries,regularizer=potts) inf=opengm.inference.BeliefPropagation(gm,parameter=opengm.InfParam(damping=0.5)) class PyCallback(object): def __init__(self,shape,numLabels): self.shape=shape self.numLabels=numLabels self.cmap = matplotlib.colors.ListedColormap ( numpy.random.rand ( self.numLabels,3)) matplotlib.interactive(True) def begin(self,inference): print "begin of inference" def end(self,inference): print "end of inference" def visit(self,inference): gm=inference.gm() labelVector=inference.arg() print "energy ",gm.evaluate(labelVector) labelVector=labelVector.reshape(self.shape) plt.imshow(labelVector*255.0, cmap=self.cmap,interpolation="nearest") plt.draw() callback=PyCallback(shape,numLabels) visitor=inf.pythonVisitor(callback,visitNth=1) inf.infer(visitor) argmin=inf.arg()
mit
bikong2/scikit-learn
examples/ensemble/plot_ensemble_oob.py
259
3265
""" ============================= OOB Errors for Random Forests ============================= The ``RandomForestClassifier`` is trained using *bootstrap aggregation*, where each new tree is fit from a bootstrap sample of the training observations :math:`z_i = (x_i, y_i)`. The *out-of-bag* (OOB) error is the average error for each :math:`z_i` calculated using predictions from the trees that do not contain :math:`z_i` in their respective bootstrap sample. This allows the ``RandomForestClassifier`` to be fit and validated whilst being trained [1]. The example below demonstrates how the OOB error can be measured at the addition of each new tree during training. The resulting plot allows a practitioner to approximate a suitable value of ``n_estimators`` at which the error stabilizes. .. [1] T. Hastie, R. Tibshirani and J. Friedman, "Elements of Statistical Learning Ed. 2", p592-593, Springer, 2009. """ import matplotlib.pyplot as plt from collections import OrderedDict from sklearn.datasets import make_classification from sklearn.ensemble import RandomForestClassifier, ExtraTreesClassifier # Author: Kian Ho <hui.kian.ho@gmail.com> # Gilles Louppe <g.louppe@gmail.com> # Andreas Mueller <amueller@ais.uni-bonn.de> # # License: BSD 3 Clause print(__doc__) RANDOM_STATE = 123 # Generate a binary classification dataset. X, y = make_classification(n_samples=500, n_features=25, n_clusters_per_class=1, n_informative=15, random_state=RANDOM_STATE) # NOTE: Setting the `warm_start` construction parameter to `True` disables # support for paralellised ensembles but is necessary for tracking the OOB # error trajectory during training. ensemble_clfs = [ ("RandomForestClassifier, max_features='sqrt'", RandomForestClassifier(warm_start=True, oob_score=True, max_features="sqrt", random_state=RANDOM_STATE)), ("RandomForestClassifier, max_features='log2'", RandomForestClassifier(warm_start=True, max_features='log2', oob_score=True, random_state=RANDOM_STATE)), ("RandomForestClassifier, max_features=None", RandomForestClassifier(warm_start=True, max_features=None, oob_score=True, random_state=RANDOM_STATE)) ] # Map a classifier name to a list of (<n_estimators>, <error rate>) pairs. error_rate = OrderedDict((label, []) for label, _ in ensemble_clfs) # Range of `n_estimators` values to explore. min_estimators = 15 max_estimators = 175 for label, clf in ensemble_clfs: for i in range(min_estimators, max_estimators + 1): clf.set_params(n_estimators=i) clf.fit(X, y) # Record the OOB error for each `n_estimators=i` setting. oob_error = 1 - clf.oob_score_ error_rate[label].append((i, oob_error)) # Generate the "OOB error rate" vs. "n_estimators" plot. for label, clf_err in error_rate.items(): xs, ys = zip(*clf_err) plt.plot(xs, ys, label=label) plt.xlim(min_estimators, max_estimators) plt.xlabel("n_estimators") plt.ylabel("OOB error rate") plt.legend(loc="upper right") plt.show()
bsd-3-clause
dingocuster/scikit-learn
sklearn/metrics/regression.py
175
16953
"""Metrics to assess performance on regression task Functions named as ``*_score`` return a scalar value to maximize: the higher the better Function named as ``*_error`` or ``*_loss`` return a scalar value to minimize: the lower the better """ # Authors: Alexandre Gramfort <alexandre.gramfort@inria.fr> # Mathieu Blondel <mathieu@mblondel.org> # Olivier Grisel <olivier.grisel@ensta.org> # Arnaud Joly <a.joly@ulg.ac.be> # Jochen Wersdorfer <jochen@wersdoerfer.de> # Lars Buitinck <L.J.Buitinck@uva.nl> # Joel Nothman <joel.nothman@gmail.com> # Noel Dawe <noel@dawe.me> # Manoj Kumar <manojkumarsivaraj334@gmail.com> # Michael Eickenberg <michael.eickenberg@gmail.com> # Konstantin Shmelkov <konstantin.shmelkov@polytechnique.edu> # License: BSD 3 clause from __future__ import division import numpy as np from ..utils.validation import check_array, check_consistent_length from ..utils.validation import column_or_1d import warnings __ALL__ = [ "mean_absolute_error", "mean_squared_error", "median_absolute_error", "r2_score", "explained_variance_score" ] def _check_reg_targets(y_true, y_pred, multioutput): """Check that y_true and y_pred belong to the same regression task Parameters ---------- y_true : array-like, y_pred : array-like, multioutput : array-like or string in ['raw_values', uniform_average', 'variance_weighted'] or None None is accepted due to backward compatibility of r2_score(). Returns ------- type_true : one of {'continuous', continuous-multioutput'} The type of the true target data, as output by 'utils.multiclass.type_of_target' y_true : array-like of shape = (n_samples, n_outputs) Ground truth (correct) target values. y_pred : array-like of shape = (n_samples, n_outputs) Estimated target values. multioutput : array-like of shape = (n_outputs) or string in ['raw_values', uniform_average', 'variance_weighted'] or None Custom output weights if ``multioutput`` is array-like or just the corresponding argument if ``multioutput`` is a correct keyword. """ check_consistent_length(y_true, y_pred) y_true = check_array(y_true, ensure_2d=False) y_pred = check_array(y_pred, ensure_2d=False) if y_true.ndim == 1: y_true = y_true.reshape((-1, 1)) if y_pred.ndim == 1: y_pred = y_pred.reshape((-1, 1)) if y_true.shape[1] != y_pred.shape[1]: raise ValueError("y_true and y_pred have different number of output " "({0}!={1})".format(y_true.shape[1], y_pred.shape[1])) n_outputs = y_true.shape[1] multioutput_options = (None, 'raw_values', 'uniform_average', 'variance_weighted') if multioutput not in multioutput_options: multioutput = check_array(multioutput, ensure_2d=False) if n_outputs == 1: raise ValueError("Custom weights are useful only in " "multi-output cases.") elif n_outputs != len(multioutput): raise ValueError(("There must be equally many custom weights " "(%d) as outputs (%d).") % (len(multioutput), n_outputs)) y_type = 'continuous' if n_outputs == 1 else 'continuous-multioutput' return y_type, y_true, y_pred, multioutput def mean_absolute_error(y_true, y_pred, sample_weight=None, multioutput='uniform_average'): """Mean absolute error regression loss Read more in the :ref:`User Guide <mean_absolute_error>`. Parameters ---------- y_true : array-like of shape = (n_samples) or (n_samples, n_outputs) Ground truth (correct) target values. y_pred : array-like of shape = (n_samples) or (n_samples, n_outputs) Estimated target values. sample_weight : array-like of shape = (n_samples), optional Sample weights. multioutput : string in ['raw_values', 'uniform_average'] or array-like of shape (n_outputs) Defines aggregating of multiple output values. Array-like value defines weights used to average errors. 'raw_values' : Returns a full set of errors in case of multioutput input. 'uniform_average' : Errors of all outputs are averaged with uniform weight. Returns ------- loss : float or ndarray of floats If multioutput is 'raw_values', then mean absolute error is returned for each output separately. If multioutput is 'uniform_average' or an ndarray of weights, then the weighted average of all output errors is returned. MAE output is non-negative floating point. The best value is 0.0. Examples -------- >>> from sklearn.metrics import mean_absolute_error >>> y_true = [3, -0.5, 2, 7] >>> y_pred = [2.5, 0.0, 2, 8] >>> mean_absolute_error(y_true, y_pred) 0.5 >>> y_true = [[0.5, 1], [-1, 1], [7, -6]] >>> y_pred = [[0, 2], [-1, 2], [8, -5]] >>> mean_absolute_error(y_true, y_pred) 0.75 >>> mean_absolute_error(y_true, y_pred, multioutput='raw_values') array([ 0.5, 1. ]) >>> mean_absolute_error(y_true, y_pred, multioutput=[0.3, 0.7]) ... # doctest: +ELLIPSIS 0.849... """ y_type, y_true, y_pred, multioutput = _check_reg_targets( y_true, y_pred, multioutput) output_errors = np.average(np.abs(y_pred - y_true), weights=sample_weight, axis=0) if multioutput == 'raw_values': return output_errors elif multioutput == 'uniform_average': # pass None as weights to np.average: uniform mean multioutput = None return np.average(output_errors, weights=multioutput) def mean_squared_error(y_true, y_pred, sample_weight=None, multioutput='uniform_average'): """Mean squared error regression loss Read more in the :ref:`User Guide <mean_squared_error>`. Parameters ---------- y_true : array-like of shape = (n_samples) or (n_samples, n_outputs) Ground truth (correct) target values. y_pred : array-like of shape = (n_samples) or (n_samples, n_outputs) Estimated target values. sample_weight : array-like of shape = (n_samples), optional Sample weights. multioutput : string in ['raw_values', 'uniform_average'] or array-like of shape (n_outputs) Defines aggregating of multiple output values. Array-like value defines weights used to average errors. 'raw_values' : Returns a full set of errors in case of multioutput input. 'uniform_average' : Errors of all outputs are averaged with uniform weight. Returns ------- loss : float or ndarray of floats A non-negative floating point value (the best value is 0.0), or an array of floating point values, one for each individual target. Examples -------- >>> from sklearn.metrics import mean_squared_error >>> y_true = [3, -0.5, 2, 7] >>> y_pred = [2.5, 0.0, 2, 8] >>> mean_squared_error(y_true, y_pred) 0.375 >>> y_true = [[0.5, 1],[-1, 1],[7, -6]] >>> y_pred = [[0, 2],[-1, 2],[8, -5]] >>> mean_squared_error(y_true, y_pred) # doctest: +ELLIPSIS 0.708... >>> mean_squared_error(y_true, y_pred, multioutput='raw_values') ... # doctest: +ELLIPSIS array([ 0.416..., 1. ]) >>> mean_squared_error(y_true, y_pred, multioutput=[0.3, 0.7]) ... # doctest: +ELLIPSIS 0.824... """ y_type, y_true, y_pred, multioutput = _check_reg_targets( y_true, y_pred, multioutput) output_errors = np.average((y_true - y_pred) ** 2, axis=0, weights=sample_weight) if multioutput == 'raw_values': return output_errors elif multioutput == 'uniform_average': # pass None as weights to np.average: uniform mean multioutput = None return np.average(output_errors, weights=multioutput) def median_absolute_error(y_true, y_pred): """Median absolute error regression loss Read more in the :ref:`User Guide <median_absolute_error>`. Parameters ---------- y_true : array-like of shape = (n_samples) Ground truth (correct) target values. y_pred : array-like of shape = (n_samples) Estimated target values. Returns ------- loss : float A positive floating point value (the best value is 0.0). Examples -------- >>> from sklearn.metrics import median_absolute_error >>> y_true = [3, -0.5, 2, 7] >>> y_pred = [2.5, 0.0, 2, 8] >>> median_absolute_error(y_true, y_pred) 0.5 """ y_type, y_true, y_pred, _ = _check_reg_targets(y_true, y_pred, 'uniform_average') if y_type == 'continuous-multioutput': raise ValueError("Multioutput not supported in median_absolute_error") return np.median(np.abs(y_pred - y_true)) def explained_variance_score(y_true, y_pred, sample_weight=None, multioutput='uniform_average'): """Explained variance regression score function Best possible score is 1.0, lower values are worse. Read more in the :ref:`User Guide <explained_variance_score>`. Parameters ---------- y_true : array-like of shape = (n_samples) or (n_samples, n_outputs) Ground truth (correct) target values. y_pred : array-like of shape = (n_samples) or (n_samples, n_outputs) Estimated target values. sample_weight : array-like of shape = (n_samples), optional Sample weights. multioutput : string in ['raw_values', 'uniform_average', \ 'variance_weighted'] or array-like of shape (n_outputs) Defines aggregating of multiple output scores. Array-like value defines weights used to average scores. 'raw_values' : Returns a full set of scores in case of multioutput input. 'uniform_average' : Scores of all outputs are averaged with uniform weight. 'variance_weighted' : Scores of all outputs are averaged, weighted by the variances of each individual output. Returns ------- score : float or ndarray of floats The explained variance or ndarray if 'multioutput' is 'raw_values'. Notes ----- This is not a symmetric function. Examples -------- >>> from sklearn.metrics import explained_variance_score >>> y_true = [3, -0.5, 2, 7] >>> y_pred = [2.5, 0.0, 2, 8] >>> explained_variance_score(y_true, y_pred) # doctest: +ELLIPSIS 0.957... >>> y_true = [[0.5, 1], [-1, 1], [7, -6]] >>> y_pred = [[0, 2], [-1, 2], [8, -5]] >>> explained_variance_score(y_true, y_pred, multioutput='uniform_average') ... # doctest: +ELLIPSIS 0.983... """ y_type, y_true, y_pred, multioutput = _check_reg_targets( y_true, y_pred, multioutput) y_diff_avg = np.average(y_true - y_pred, weights=sample_weight, axis=0) numerator = np.average((y_true - y_pred - y_diff_avg) ** 2, weights=sample_weight, axis=0) y_true_avg = np.average(y_true, weights=sample_weight, axis=0) denominator = np.average((y_true - y_true_avg) ** 2, weights=sample_weight, axis=0) nonzero_numerator = numerator != 0 nonzero_denominator = denominator != 0 valid_score = nonzero_numerator & nonzero_denominator output_scores = np.ones(y_true.shape[1]) output_scores[valid_score] = 1 - (numerator[valid_score] / denominator[valid_score]) output_scores[nonzero_numerator & ~nonzero_denominator] = 0. if multioutput == 'raw_values': # return scores individually return output_scores elif multioutput == 'uniform_average': # passing to np.average() None as weights results is uniform mean avg_weights = None elif multioutput == 'variance_weighted': avg_weights = denominator else: avg_weights = multioutput return np.average(output_scores, weights=avg_weights) def r2_score(y_true, y_pred, sample_weight=None, multioutput=None): """R^2 (coefficient of determination) regression score function. Best possible score is 1.0 and it can be negative (because the model can be arbitrarily worse). A constant model that always predicts the expected value of y, disregarding the input features, would get a R^2 score of 0.0. Read more in the :ref:`User Guide <r2_score>`. Parameters ---------- y_true : array-like of shape = (n_samples) or (n_samples, n_outputs) Ground truth (correct) target values. y_pred : array-like of shape = (n_samples) or (n_samples, n_outputs) Estimated target values. sample_weight : array-like of shape = (n_samples), optional Sample weights. multioutput : string in ['raw_values', 'uniform_average', 'variance_weighted'] or None or array-like of shape (n_outputs) Defines aggregating of multiple output scores. Array-like value defines weights used to average scores. Default value correponds to 'variance_weighted', but will be changed to 'uniform_average' in next versions. 'raw_values' : Returns a full set of scores in case of multioutput input. 'uniform_average' : Scores of all outputs are averaged with uniform weight. 'variance_weighted' : Scores of all outputs are averaged, weighted by the variances of each individual output. Returns ------- z : float or ndarray of floats The R^2 score or ndarray of scores if 'multioutput' is 'raw_values'. Notes ----- This is not a symmetric function. Unlike most other scores, R^2 score may be negative (it need not actually be the square of a quantity R). References ---------- .. [1] `Wikipedia entry on the Coefficient of determination <http://en.wikipedia.org/wiki/Coefficient_of_determination>`_ Examples -------- >>> from sklearn.metrics import r2_score >>> y_true = [3, -0.5, 2, 7] >>> y_pred = [2.5, 0.0, 2, 8] >>> r2_score(y_true, y_pred) # doctest: +ELLIPSIS 0.948... >>> y_true = [[0.5, 1], [-1, 1], [7, -6]] >>> y_pred = [[0, 2], [-1, 2], [8, -5]] >>> r2_score(y_true, y_pred, multioutput='variance_weighted') # doctest: +ELLIPSIS 0.938... """ y_type, y_true, y_pred, multioutput = _check_reg_targets( y_true, y_pred, multioutput) if sample_weight is not None: sample_weight = column_or_1d(sample_weight) weight = sample_weight[:, np.newaxis] else: weight = 1. numerator = (weight * (y_true - y_pred) ** 2).sum(axis=0, dtype=np.float64) denominator = (weight * (y_true - np.average( y_true, axis=0, weights=sample_weight)) ** 2).sum(axis=0, dtype=np.float64) nonzero_denominator = denominator != 0 nonzero_numerator = numerator != 0 valid_score = nonzero_denominator & nonzero_numerator output_scores = np.ones([y_true.shape[1]]) output_scores[valid_score] = 1 - (numerator[valid_score] / denominator[valid_score]) # arbitrary set to zero to avoid -inf scores, having a constant # y_true is not interesting for scoring a regression anyway output_scores[nonzero_numerator & ~nonzero_denominator] = 0. if multioutput is None and y_true.shape[1] != 1: # @FIXME change in 0.18 warnings.warn("Default 'multioutput' behavior now corresponds to " "'variance_weighted' value, it will be changed " "to 'uniform_average' in 0.18.", DeprecationWarning) multioutput = 'variance_weighted' if multioutput == 'raw_values': # return scores individually return output_scores elif multioutput == 'uniform_average': # passing None as weights results is uniform mean avg_weights = None elif multioutput == 'variance_weighted': avg_weights = denominator # avoid fail on constant y or one-element arrays if not np.any(nonzero_denominator): if not np.any(nonzero_numerator): return 1.0 else: return 0.0 else: avg_weights = multioutput return np.average(output_scores, weights=avg_weights)
bsd-3-clause
trankmichael/scikit-learn
examples/cluster/plot_agglomerative_clustering_metrics.py
402
4492
""" Agglomerative clustering with different metrics =============================================== Demonstrates the effect of different metrics on the hierarchical clustering. The example is engineered to show the effect of the choice of different metrics. It is applied to waveforms, which can be seen as high-dimensional vector. Indeed, the difference between metrics is usually more pronounced in high dimension (in particular for euclidean and cityblock). We generate data from three groups of waveforms. Two of the waveforms (waveform 1 and waveform 2) are proportional one to the other. The cosine distance is invariant to a scaling of the data, as a result, it cannot distinguish these two waveforms. Thus even with no noise, clustering using this distance will not separate out waveform 1 and 2. We add observation noise to these waveforms. We generate very sparse noise: only 6% of the time points contain noise. As a result, the l1 norm of this noise (ie "cityblock" distance) is much smaller than it's l2 norm ("euclidean" distance). This can be seen on the inter-class distance matrices: the values on the diagonal, that characterize the spread of the class, are much bigger for the Euclidean distance than for the cityblock distance. When we apply clustering to the data, we find that the clustering reflects what was in the distance matrices. Indeed, for the Euclidean distance, the classes are ill-separated because of the noise, and thus the clustering does not separate the waveforms. For the cityblock distance, the separation is good and the waveform classes are recovered. Finally, the cosine distance does not separate at all waveform 1 and 2, thus the clustering puts them in the same cluster. """ # Author: Gael Varoquaux # License: BSD 3-Clause or CC-0 import matplotlib.pyplot as plt import numpy as np from sklearn.cluster import AgglomerativeClustering from sklearn.metrics import pairwise_distances np.random.seed(0) # Generate waveform data n_features = 2000 t = np.pi * np.linspace(0, 1, n_features) def sqr(x): return np.sign(np.cos(x)) X = list() y = list() for i, (phi, a) in enumerate([(.5, .15), (.5, .6), (.3, .2)]): for _ in range(30): phase_noise = .01 * np.random.normal() amplitude_noise = .04 * np.random.normal() additional_noise = 1 - 2 * np.random.rand(n_features) # Make the noise sparse additional_noise[np.abs(additional_noise) < .997] = 0 X.append(12 * ((a + amplitude_noise) * (sqr(6 * (t + phi + phase_noise))) + additional_noise)) y.append(i) X = np.array(X) y = np.array(y) n_clusters = 3 labels = ('Waveform 1', 'Waveform 2', 'Waveform 3') # Plot the ground-truth labelling plt.figure() plt.axes([0, 0, 1, 1]) for l, c, n in zip(range(n_clusters), 'rgb', labels): lines = plt.plot(X[y == l].T, c=c, alpha=.5) lines[0].set_label(n) plt.legend(loc='best') plt.axis('tight') plt.axis('off') plt.suptitle("Ground truth", size=20) # Plot the distances for index, metric in enumerate(["cosine", "euclidean", "cityblock"]): avg_dist = np.zeros((n_clusters, n_clusters)) plt.figure(figsize=(5, 4.5)) for i in range(n_clusters): for j in range(n_clusters): avg_dist[i, j] = pairwise_distances(X[y == i], X[y == j], metric=metric).mean() avg_dist /= avg_dist.max() for i in range(n_clusters): for j in range(n_clusters): plt.text(i, j, '%5.3f' % avg_dist[i, j], verticalalignment='center', horizontalalignment='center') plt.imshow(avg_dist, interpolation='nearest', cmap=plt.cm.gnuplot2, vmin=0) plt.xticks(range(n_clusters), labels, rotation=45) plt.yticks(range(n_clusters), labels) plt.colorbar() plt.suptitle("Interclass %s distances" % metric, size=18) plt.tight_layout() # Plot clustering results for index, metric in enumerate(["cosine", "euclidean", "cityblock"]): model = AgglomerativeClustering(n_clusters=n_clusters, linkage="average", affinity=metric) model.fit(X) plt.figure() plt.axes([0, 0, 1, 1]) for l, c in zip(np.arange(model.n_clusters), 'rgbk'): plt.plot(X[model.labels_ == l].T, c=c, alpha=.5) plt.axis('tight') plt.axis('off') plt.suptitle("AgglomerativeClustering(affinity=%s)" % metric, size=20) plt.show()
bsd-3-clause
wanggang3333/scikit-learn
examples/model_selection/plot_validation_curve.py
229
1823
""" ========================== Plotting Validation Curves ========================== In this plot you can see the training scores and validation scores of an SVM for different values of the kernel parameter gamma. For very low values of gamma, you can see that both the training score and the validation score are low. This is called underfitting. Medium values of gamma will result in high values for both scores, i.e. the classifier is performing fairly well. If gamma is too high, the classifier will overfit, which means that the training score is good but the validation score is poor. """ print(__doc__) import matplotlib.pyplot as plt import numpy as np from sklearn.datasets import load_digits from sklearn.svm import SVC from sklearn.learning_curve import validation_curve digits = load_digits() X, y = digits.data, digits.target param_range = np.logspace(-6, -1, 5) train_scores, test_scores = validation_curve( SVC(), X, y, param_name="gamma", param_range=param_range, cv=10, scoring="accuracy", n_jobs=1) train_scores_mean = np.mean(train_scores, axis=1) train_scores_std = np.std(train_scores, axis=1) test_scores_mean = np.mean(test_scores, axis=1) test_scores_std = np.std(test_scores, axis=1) plt.title("Validation Curve with SVM") plt.xlabel("$\gamma$") plt.ylabel("Score") plt.ylim(0.0, 1.1) plt.semilogx(param_range, train_scores_mean, label="Training score", color="r") plt.fill_between(param_range, train_scores_mean - train_scores_std, train_scores_mean + train_scores_std, alpha=0.2, color="r") plt.semilogx(param_range, test_scores_mean, label="Cross-validation score", color="g") plt.fill_between(param_range, test_scores_mean - test_scores_std, test_scores_mean + test_scores_std, alpha=0.2, color="g") plt.legend(loc="best") plt.show()
bsd-3-clause
yunfeilu/scikit-learn
examples/semi_supervised/plot_label_propagation_versus_svm_iris.py
286
2378
""" ===================================================================== Decision boundary of label propagation versus SVM on the Iris dataset ===================================================================== Comparison for decision boundary generated on iris dataset between Label Propagation and SVM. This demonstrates Label Propagation learning a good boundary even with a small amount of labeled data. """ print(__doc__) # Authors: Clay Woolam <clay@woolam.org> # Licence: BSD import numpy as np import matplotlib.pyplot as plt from sklearn import datasets from sklearn import svm from sklearn.semi_supervised import label_propagation rng = np.random.RandomState(0) iris = datasets.load_iris() X = iris.data[:, :2] y = iris.target # step size in the mesh h = .02 y_30 = np.copy(y) y_30[rng.rand(len(y)) < 0.3] = -1 y_50 = np.copy(y) y_50[rng.rand(len(y)) < 0.5] = -1 # we create an instance of SVM and fit out data. We do not scale our # data since we want to plot the support vectors ls30 = (label_propagation.LabelSpreading().fit(X, y_30), y_30) ls50 = (label_propagation.LabelSpreading().fit(X, y_50), y_50) ls100 = (label_propagation.LabelSpreading().fit(X, y), y) rbf_svc = (svm.SVC(kernel='rbf').fit(X, y), y) # create a mesh to plot in x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1 y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1 xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h)) # title for the plots titles = ['Label Spreading 30% data', 'Label Spreading 50% data', 'Label Spreading 100% data', 'SVC with rbf kernel'] color_map = {-1: (1, 1, 1), 0: (0, 0, .9), 1: (1, 0, 0), 2: (.8, .6, 0)} for i, (clf, y_train) in enumerate((ls30, ls50, ls100, rbf_svc)): # Plot the decision boundary. For that, we will assign a color to each # point in the mesh [x_min, m_max]x[y_min, y_max]. plt.subplot(2, 2, i + 1) Z = clf.predict(np.c_[xx.ravel(), yy.ravel()]) # Put the result into a color plot Z = Z.reshape(xx.shape) plt.contourf(xx, yy, Z, cmap=plt.cm.Paired) plt.axis('off') # Plot also the training points colors = [color_map[y] for y in y_train] plt.scatter(X[:, 0], X[:, 1], c=colors, cmap=plt.cm.Paired) plt.title(titles[i]) plt.text(.90, 0, "Unlabeled points are colored white") plt.show()
bsd-3-clause
alexei-matveev/ase-local
doc/exercises/siesta1/answer1.py
3
1197
# -*- coding: utf-8 -*- # creates: ener.png distance.png angle.png import os import matplotlib matplotlib.use('Agg') import pylab as plt e_s = [0.01,0.1,0.2,0.3,0.4,0.5] E = [-463.2160, -462.9633, -462.4891, -462.0551, -461.5426, -461.1714] d = [1.1131, 1.1046, 1.0960, 1.0901, 1.0857, 1.0810] alpha = [100.832453365, 99.568214268, 99.1486065462, 98.873671379, 98.1726341945, 98.0535643778] fig=plt.figure(figsize=(3, 2.5)) fig.subplots_adjust(left=.29, right=.96, top=.9, bottom=0.16) plt.plot(e_s, E, 'o-') plt.xlabel(u'Energy shift [eV]') plt.ylabel(u'Energy [eV]') plt.title('Total Energy vs Eshift') plt.savefig('ener.png') fig=plt.figure(figsize=(3, 2.5)) fig.subplots_adjust(left=.24, right=.96, top=.9, bottom=0.16) plt.plot(e_s, d, 'o-') plt.xlabel(u'Energy shift [eV]') plt.ylabel(u'O-H distance [Å]') limits = plt.axis('tight') plt.title('O-H distance vs Eshift') plt.savefig('distance.png') fig=plt.figure(figsize=(3, 2.5)) fig.subplots_adjust(left=.26, right=.96, top=.9, bottom=0.16) plt.plot(e_s, alpha, 'o-') plt.xlabel(u'Energy shift [eV]') plt.ylabel(u'H20 angle') limits = plt.axis('tight') plt.title('O-H distance vs Eshift') plt.savefig('angle.png')
gpl-2.0
saimn/astropy
astropy/visualization/wcsaxes/frame.py
8
10649
# Licensed under a 3-clause BSD style license - see LICENSE.rst import abc from collections import OrderedDict import numpy as np from matplotlib import rcParams from matplotlib.lines import Line2D, Path from matplotlib.patches import PathPatch __all__ = ['RectangularFrame1D', 'Spine', 'BaseFrame', 'RectangularFrame', 'EllipticalFrame'] class Spine: """ A single side of an axes. This does not need to be a straight line, but represents a 'side' when determining which part of the frame to put labels and ticks on. """ def __init__(self, parent_axes, transform): self.parent_axes = parent_axes self.transform = transform self.data = None self.pixel = None self.world = None @property def data(self): return self._data @data.setter def data(self, value): if value is None: self._data = None self._pixel = None self._world = None else: self._data = value self._pixel = self.parent_axes.transData.transform(self._data) with np.errstate(invalid='ignore'): self._world = self.transform.transform(self._data) self._update_normal() @property def pixel(self): return self._pixel @pixel.setter def pixel(self, value): if value is None: self._data = None self._pixel = None self._world = None else: self._data = self.parent_axes.transData.inverted().transform(self._data) self._pixel = value self._world = self.transform.transform(self._data) self._update_normal() @property def world(self): return self._world @world.setter def world(self, value): if value is None: self._data = None self._pixel = None self._world = None else: self._data = self.transform.transform(value) self._pixel = self.parent_axes.transData.transform(self._data) self._world = value self._update_normal() def _update_normal(self): # Find angle normal to border and inwards, in display coordinate dx = self.pixel[1:, 0] - self.pixel[:-1, 0] dy = self.pixel[1:, 1] - self.pixel[:-1, 1] self.normal_angle = np.degrees(np.arctan2(dx, -dy)) def _halfway_x_y_angle(self): """ Return the x, y, normal_angle values halfway along the spine """ x_disp, y_disp = self.pixel[:, 0], self.pixel[:, 1] # Get distance along the path d = np.hstack([0., np.cumsum(np.sqrt(np.diff(x_disp) ** 2 + np.diff(y_disp) ** 2))]) xcen = np.interp(d[-1] / 2., d, x_disp) ycen = np.interp(d[-1] / 2., d, y_disp) # Find segment along which the mid-point lies imin = np.searchsorted(d, d[-1] / 2.) - 1 # Find normal of the axis label facing outwards on that segment normal_angle = self.normal_angle[imin] + 180. return xcen, ycen, normal_angle class SpineXAligned(Spine): """ A single side of an axes, aligned with the X data axis. This does not need to be a straight line, but represents a 'side' when determining which part of the frame to put labels and ticks on. """ @property def data(self): return self._data @data.setter def data(self, value): if value is None: self._data = None self._pixel = None self._world = None else: self._data = value self._pixel = self.parent_axes.transData.transform(self._data) with np.errstate(invalid='ignore'): self._world = self.transform.transform(self._data[:,0:1]) self._update_normal() @property def pixel(self): return self._pixel @pixel.setter def pixel(self, value): if value is None: self._data = None self._pixel = None self._world = None else: self._data = self.parent_axes.transData.inverted().transform(self._data) self._pixel = value self._world = self.transform.transform(self._data[:,0:1]) self._update_normal() class BaseFrame(OrderedDict, metaclass=abc.ABCMeta): """ Base class for frames, which are collections of :class:`~astropy.visualization.wcsaxes.frame.Spine` instances. """ spine_class = Spine def __init__(self, parent_axes, transform, path=None): super().__init__() self.parent_axes = parent_axes self._transform = transform self._linewidth = rcParams['axes.linewidth'] self._color = rcParams['axes.edgecolor'] self._path = path for axis in self.spine_names: self[axis] = self.spine_class(parent_axes, transform) @property def origin(self): ymin, ymax = self.parent_axes.get_ylim() return 'lower' if ymin < ymax else 'upper' @property def transform(self): return self._transform @transform.setter def transform(self, value): self._transform = value for axis in self: self[axis].transform = value def _update_patch_path(self): self.update_spines() x, y = [], [] for axis in self: x.append(self[axis].data[:, 0]) y.append(self[axis].data[:, 1]) vertices = np.vstack([np.hstack(x), np.hstack(y)]).transpose() if self._path is None: self._path = Path(vertices) else: self._path.vertices = vertices @property def patch(self): self._update_patch_path() return PathPatch(self._path, transform=self.parent_axes.transData, facecolor=rcParams['axes.facecolor'], edgecolor='white') def draw(self, renderer): for axis in self: x, y = self[axis].pixel[:, 0], self[axis].pixel[:, 1] line = Line2D(x, y, linewidth=self._linewidth, color=self._color, zorder=1000) line.draw(renderer) def sample(self, n_samples): self.update_spines() spines = OrderedDict() for axis in self: data = self[axis].data p = np.linspace(0., 1., data.shape[0]) p_new = np.linspace(0., 1., n_samples) spines[axis] = self.spine_class(self.parent_axes, self.transform) spines[axis].data = np.array([np.interp(p_new, p, d) for d in data.T]).transpose() return spines def set_color(self, color): """ Sets the color of the frame. Parameters ---------- color : str The color of the frame. """ self._color = color def get_color(self): return self._color def set_linewidth(self, linewidth): """ Sets the linewidth of the frame. Parameters ---------- linewidth : float The linewidth of the frame in points. """ self._linewidth = linewidth def get_linewidth(self): return self._linewidth @abc.abstractmethod def update_spines(self): raise NotImplementedError("") class RectangularFrame1D(BaseFrame): """ A classic rectangular frame. """ spine_names = 'bt' spine_class = SpineXAligned def update_spines(self): xmin, xmax = self.parent_axes.get_xlim() ymin, ymax = self.parent_axes.get_ylim() self['b'].data = np.array(([xmin, ymin], [xmax, ymin])) self['t'].data = np.array(([xmax, ymax], [xmin, ymax])) def _update_patch_path(self): self.update_spines() xmin, xmax = self.parent_axes.get_xlim() ymin, ymax = self.parent_axes.get_ylim() x = [xmin, xmax, xmax, xmin, xmin] y = [ymin, ymin, ymax, ymax, ymin] vertices = np.vstack([np.hstack(x), np.hstack(y)]).transpose() if self._path is None: self._path = Path(vertices) else: self._path.vertices = vertices def draw(self, renderer): xmin, xmax = self.parent_axes.get_xlim() ymin, ymax = self.parent_axes.get_ylim() x = [xmin, xmax, xmax, xmin, xmin] y = [ymin, ymin, ymax, ymax, ymin] line = Line2D(x, y, linewidth=self._linewidth, color=self._color, zorder=1000, transform=self.parent_axes.transData) line.draw(renderer) class RectangularFrame(BaseFrame): """ A classic rectangular frame. """ spine_names = 'brtl' def update_spines(self): xmin, xmax = self.parent_axes.get_xlim() ymin, ymax = self.parent_axes.get_ylim() self['b'].data = np.array(([xmin, ymin], [xmax, ymin])) self['r'].data = np.array(([xmax, ymin], [xmax, ymax])) self['t'].data = np.array(([xmax, ymax], [xmin, ymax])) self['l'].data = np.array(([xmin, ymax], [xmin, ymin])) class EllipticalFrame(BaseFrame): """ An elliptical frame. """ spine_names = 'chv' def update_spines(self): xmin, xmax = self.parent_axes.get_xlim() ymin, ymax = self.parent_axes.get_ylim() xmid = 0.5 * (xmax + xmin) ymid = 0.5 * (ymax + ymin) dx = xmid - xmin dy = ymid - ymin theta = np.linspace(0., 2 * np.pi, 1000) self['c'].data = np.array([xmid + dx * np.cos(theta), ymid + dy * np.sin(theta)]).transpose() self['h'].data = np.array([np.linspace(xmin, xmax, 1000), np.repeat(ymid, 1000)]).transpose() self['v'].data = np.array([np.repeat(xmid, 1000), np.linspace(ymin, ymax, 1000)]).transpose() def _update_patch_path(self): """Override path patch to include only the outer ellipse, not the major and minor axes in the middle.""" self.update_spines() vertices = self['c'].data if self._path is None: self._path = Path(vertices) else: self._path.vertices = vertices def draw(self, renderer): """Override to draw only the outer ellipse, not the major and minor axes in the middle. FIXME: we may want to add a general method to give the user control over which spines are drawn.""" axis = 'c' x, y = self[axis].pixel[:, 0], self[axis].pixel[:, 1] line = Line2D(x, y, linewidth=self._linewidth, color=self._color, zorder=1000) line.draw(renderer)
bsd-3-clause
HeraclesHX/scikit-learn
sklearn/cluster/tests/test_dbscan.py
114
11393
""" Tests for DBSCAN clustering algorithm """ import pickle import numpy as np from scipy.spatial import distance from scipy import sparse from sklearn.utils.testing import assert_equal from sklearn.utils.testing import assert_array_equal from sklearn.utils.testing import assert_raises from sklearn.utils.testing import assert_in from sklearn.utils.testing import assert_not_in from sklearn.cluster.dbscan_ import DBSCAN from sklearn.cluster.dbscan_ import dbscan from sklearn.cluster.tests.common import generate_clustered_data from sklearn.metrics.pairwise import pairwise_distances n_clusters = 3 X = generate_clustered_data(n_clusters=n_clusters) def test_dbscan_similarity(): # Tests the DBSCAN algorithm with a similarity array. # Parameters chosen specifically for this task. eps = 0.15 min_samples = 10 # Compute similarities D = distance.squareform(distance.pdist(X)) D /= np.max(D) # Compute DBSCAN core_samples, labels = dbscan(D, metric="precomputed", eps=eps, min_samples=min_samples) # number of clusters, ignoring noise if present n_clusters_1 = len(set(labels)) - (1 if -1 in labels else 0) assert_equal(n_clusters_1, n_clusters) db = DBSCAN(metric="precomputed", eps=eps, min_samples=min_samples) labels = db.fit(D).labels_ n_clusters_2 = len(set(labels)) - int(-1 in labels) assert_equal(n_clusters_2, n_clusters) def test_dbscan_feature(): # Tests the DBSCAN algorithm with a feature vector array. # Parameters chosen specifically for this task. # Different eps to other test, because distance is not normalised. eps = 0.8 min_samples = 10 metric = 'euclidean' # Compute DBSCAN # parameters chosen for task core_samples, labels = dbscan(X, metric=metric, eps=eps, min_samples=min_samples) # number of clusters, ignoring noise if present n_clusters_1 = len(set(labels)) - int(-1 in labels) assert_equal(n_clusters_1, n_clusters) db = DBSCAN(metric=metric, eps=eps, min_samples=min_samples) labels = db.fit(X).labels_ n_clusters_2 = len(set(labels)) - int(-1 in labels) assert_equal(n_clusters_2, n_clusters) def test_dbscan_sparse(): core_sparse, labels_sparse = dbscan(sparse.lil_matrix(X), eps=.8, min_samples=10) core_dense, labels_dense = dbscan(X, eps=.8, min_samples=10) assert_array_equal(core_dense, core_sparse) assert_array_equal(labels_dense, labels_sparse) def test_dbscan_no_core_samples(): rng = np.random.RandomState(0) X = rng.rand(40, 10) X[X < .8] = 0 for X_ in [X, sparse.csr_matrix(X)]: db = DBSCAN(min_samples=6).fit(X_) assert_array_equal(db.components_, np.empty((0, X_.shape[1]))) assert_array_equal(db.labels_, -1) assert_equal(db.core_sample_indices_.shape, (0,)) def test_dbscan_callable(): # Tests the DBSCAN algorithm with a callable metric. # Parameters chosen specifically for this task. # Different eps to other test, because distance is not normalised. eps = 0.8 min_samples = 10 # metric is the function reference, not the string key. metric = distance.euclidean # Compute DBSCAN # parameters chosen for task core_samples, labels = dbscan(X, metric=metric, eps=eps, min_samples=min_samples, algorithm='ball_tree') # number of clusters, ignoring noise if present n_clusters_1 = len(set(labels)) - int(-1 in labels) assert_equal(n_clusters_1, n_clusters) db = DBSCAN(metric=metric, eps=eps, min_samples=min_samples, algorithm='ball_tree') labels = db.fit(X).labels_ n_clusters_2 = len(set(labels)) - int(-1 in labels) assert_equal(n_clusters_2, n_clusters) def test_dbscan_balltree(): # Tests the DBSCAN algorithm with balltree for neighbor calculation. eps = 0.8 min_samples = 10 D = pairwise_distances(X) core_samples, labels = dbscan(D, metric="precomputed", eps=eps, min_samples=min_samples) # number of clusters, ignoring noise if present n_clusters_1 = len(set(labels)) - int(-1 in labels) assert_equal(n_clusters_1, n_clusters) db = DBSCAN(p=2.0, eps=eps, min_samples=min_samples, algorithm='ball_tree') labels = db.fit(X).labels_ n_clusters_2 = len(set(labels)) - int(-1 in labels) assert_equal(n_clusters_2, n_clusters) db = DBSCAN(p=2.0, eps=eps, min_samples=min_samples, algorithm='kd_tree') labels = db.fit(X).labels_ n_clusters_3 = len(set(labels)) - int(-1 in labels) assert_equal(n_clusters_3, n_clusters) db = DBSCAN(p=1.0, eps=eps, min_samples=min_samples, algorithm='ball_tree') labels = db.fit(X).labels_ n_clusters_4 = len(set(labels)) - int(-1 in labels) assert_equal(n_clusters_4, n_clusters) db = DBSCAN(leaf_size=20, eps=eps, min_samples=min_samples, algorithm='ball_tree') labels = db.fit(X).labels_ n_clusters_5 = len(set(labels)) - int(-1 in labels) assert_equal(n_clusters_5, n_clusters) def test_input_validation(): # DBSCAN.fit should accept a list of lists. X = [[1., 2.], [3., 4.]] DBSCAN().fit(X) # must not raise exception def test_dbscan_badargs(): # Test bad argument values: these should all raise ValueErrors assert_raises(ValueError, dbscan, X, eps=-1.0) assert_raises(ValueError, dbscan, X, algorithm='blah') assert_raises(ValueError, dbscan, X, metric='blah') assert_raises(ValueError, dbscan, X, leaf_size=-1) assert_raises(ValueError, dbscan, X, p=-1) def test_pickle(): obj = DBSCAN() s = pickle.dumps(obj) assert_equal(type(pickle.loads(s)), obj.__class__) def test_boundaries(): # ensure min_samples is inclusive of core point core, _ = dbscan([[0], [1]], eps=2, min_samples=2) assert_in(0, core) # ensure eps is inclusive of circumference core, _ = dbscan([[0], [1], [1]], eps=1, min_samples=2) assert_in(0, core) core, _ = dbscan([[0], [1], [1]], eps=.99, min_samples=2) assert_not_in(0, core) def test_weighted_dbscan(): # ensure sample_weight is validated assert_raises(ValueError, dbscan, [[0], [1]], sample_weight=[2]) assert_raises(ValueError, dbscan, [[0], [1]], sample_weight=[2, 3, 4]) # ensure sample_weight has an effect assert_array_equal([], dbscan([[0], [1]], sample_weight=None, min_samples=6)[0]) assert_array_equal([], dbscan([[0], [1]], sample_weight=[5, 5], min_samples=6)[0]) assert_array_equal([0], dbscan([[0], [1]], sample_weight=[6, 5], min_samples=6)[0]) assert_array_equal([0, 1], dbscan([[0], [1]], sample_weight=[6, 6], min_samples=6)[0]) # points within eps of each other: assert_array_equal([0, 1], dbscan([[0], [1]], eps=1.5, sample_weight=[5, 1], min_samples=6)[0]) # and effect of non-positive and non-integer sample_weight: assert_array_equal([], dbscan([[0], [1]], sample_weight=[5, 0], eps=1.5, min_samples=6)[0]) assert_array_equal([0, 1], dbscan([[0], [1]], sample_weight=[5.9, 0.1], eps=1.5, min_samples=6)[0]) assert_array_equal([0, 1], dbscan([[0], [1]], sample_weight=[6, 0], eps=1.5, min_samples=6)[0]) assert_array_equal([], dbscan([[0], [1]], sample_weight=[6, -1], eps=1.5, min_samples=6)[0]) # for non-negative sample_weight, cores should be identical to repetition rng = np.random.RandomState(42) sample_weight = rng.randint(0, 5, X.shape[0]) core1, label1 = dbscan(X, sample_weight=sample_weight) assert_equal(len(label1), len(X)) X_repeated = np.repeat(X, sample_weight, axis=0) core_repeated, label_repeated = dbscan(X_repeated) core_repeated_mask = np.zeros(X_repeated.shape[0], dtype=bool) core_repeated_mask[core_repeated] = True core_mask = np.zeros(X.shape[0], dtype=bool) core_mask[core1] = True assert_array_equal(np.repeat(core_mask, sample_weight), core_repeated_mask) # sample_weight should work with precomputed distance matrix D = pairwise_distances(X) core3, label3 = dbscan(D, sample_weight=sample_weight, metric='precomputed') assert_array_equal(core1, core3) assert_array_equal(label1, label3) # sample_weight should work with estimator est = DBSCAN().fit(X, sample_weight=sample_weight) core4 = est.core_sample_indices_ label4 = est.labels_ assert_array_equal(core1, core4) assert_array_equal(label1, label4) est = DBSCAN() label5 = est.fit_predict(X, sample_weight=sample_weight) core5 = est.core_sample_indices_ assert_array_equal(core1, core5) assert_array_equal(label1, label5) assert_array_equal(label1, est.labels_) def test_dbscan_core_samples_toy(): X = [[0], [2], [3], [4], [6], [8], [10]] n_samples = len(X) for algorithm in ['brute', 'kd_tree', 'ball_tree']: # Degenerate case: every sample is a core sample, either with its own # cluster or including other close core samples. core_samples, labels = dbscan(X, algorithm=algorithm, eps=1, min_samples=1) assert_array_equal(core_samples, np.arange(n_samples)) assert_array_equal(labels, [0, 1, 1, 1, 2, 3, 4]) # With eps=1 and min_samples=2 only the 3 samples from the denser area # are core samples. All other points are isolated and considered noise. core_samples, labels = dbscan(X, algorithm=algorithm, eps=1, min_samples=2) assert_array_equal(core_samples, [1, 2, 3]) assert_array_equal(labels, [-1, 0, 0, 0, -1, -1, -1]) # Only the sample in the middle of the dense area is core. Its two # neighbors are edge samples. Remaining samples are noise. core_samples, labels = dbscan(X, algorithm=algorithm, eps=1, min_samples=3) assert_array_equal(core_samples, [2]) assert_array_equal(labels, [-1, 0, 0, 0, -1, -1, -1]) # It's no longer possible to extract core samples with eps=1: # everything is noise. core_samples, labels = dbscan(X, algorithm=algorithm, eps=1, min_samples=4) assert_array_equal(core_samples, []) assert_array_equal(labels, -np.ones(n_samples)) def test_dbscan_precomputed_metric_with_degenerate_input_arrays(): # see https://github.com/scikit-learn/scikit-learn/issues/4641 for # more details X = np.ones((10, 2)) labels = DBSCAN(eps=0.5, metric='precomputed').fit(X).labels_ assert_equal(len(set(labels)), 1) X = np.zeros((10, 2)) labels = DBSCAN(eps=0.5, metric='precomputed').fit(X).labels_ assert_equal(len(set(labels)), 1)
bsd-3-clause
zrhans/pythonanywhere
.virtualenvs/django19/lib/python3.4/site-packages/pandas/tseries/tests/test_frequencies.py
9
25284
from datetime import datetime, time, timedelta from pandas.compat import range import sys import os import nose import numpy as np from pandas import Index, DatetimeIndex, Timestamp, Series, date_range, period_range import pandas.tseries.frequencies as frequencies from pandas.tseries.tools import to_datetime import pandas.tseries.offsets as offsets from pandas.tseries.period import PeriodIndex import pandas.compat as compat from pandas.compat import is_platform_windows import pandas.util.testing as tm from pandas import Timedelta def test_to_offset_multiple(): freqstr = '2h30min' freqstr2 = '2h 30min' result = frequencies.to_offset(freqstr) assert(result == frequencies.to_offset(freqstr2)) expected = offsets.Minute(150) assert(result == expected) freqstr = '2h30min15s' result = frequencies.to_offset(freqstr) expected = offsets.Second(150 * 60 + 15) assert(result == expected) freqstr = '2h 60min' result = frequencies.to_offset(freqstr) expected = offsets.Hour(3) assert(result == expected) freqstr = '15l500u' result = frequencies.to_offset(freqstr) expected = offsets.Micro(15500) assert(result == expected) freqstr = '10s75L' result = frequencies.to_offset(freqstr) expected = offsets.Milli(10075) assert(result == expected) freqstr = '2800N' result = frequencies.to_offset(freqstr) expected = offsets.Nano(2800) assert(result == expected) # malformed try: frequencies.to_offset('2h20m') except ValueError: pass else: assert(False) def test_to_offset_negative(): freqstr = '-1S' result = frequencies.to_offset(freqstr) assert(result.n == -1) freqstr = '-5min10s' result = frequencies.to_offset(freqstr) assert(result.n == -310) def test_to_offset_leading_zero(): freqstr = '00H 00T 01S' result = frequencies.to_offset(freqstr) assert(result.n == 1) freqstr = '-00H 03T 14S' result = frequencies.to_offset(freqstr) assert(result.n == -194) def test_to_offset_pd_timedelta(): # Tests for #9064 td = Timedelta(days=1, seconds=1) result = frequencies.to_offset(td) expected = offsets.Second(86401) assert(expected==result) td = Timedelta(days=-1, seconds=1) result = frequencies.to_offset(td) expected = offsets.Second(-86399) assert(expected==result) td = Timedelta(hours=1, minutes=10) result = frequencies.to_offset(td) expected = offsets.Minute(70) assert(expected==result) td = Timedelta(hours=1, minutes=-10) result = frequencies.to_offset(td) expected = offsets.Minute(50) assert(expected==result) td = Timedelta(weeks=1) result = frequencies.to_offset(td) expected = offsets.Day(7) assert(expected==result) td1 = Timedelta(hours=1) result1 = frequencies.to_offset(td1) result2 = frequencies.to_offset('60min') assert(result1 == result2) td = Timedelta(microseconds=1) result = frequencies.to_offset(td) expected = offsets.Micro(1) assert(expected == result) td = Timedelta(microseconds=0) tm.assertRaises(ValueError, lambda: frequencies.to_offset(td)) def test_anchored_shortcuts(): result = frequencies.to_offset('W') expected = frequencies.to_offset('W-SUN') assert(result == expected) result1 = frequencies.to_offset('Q') result2 = frequencies.to_offset('Q-DEC') expected = offsets.QuarterEnd(startingMonth=12) assert(result1 == expected) assert(result2 == expected) result1 = frequencies.to_offset('Q-MAY') expected = offsets.QuarterEnd(startingMonth=5) assert(result1 == expected) def test_get_rule_month(): result = frequencies._get_rule_month('W') assert(result == 'DEC') result = frequencies._get_rule_month(offsets.Week()) assert(result == 'DEC') result = frequencies._get_rule_month('D') assert(result == 'DEC') result = frequencies._get_rule_month(offsets.Day()) assert(result == 'DEC') result = frequencies._get_rule_month('Q') assert(result == 'DEC') result = frequencies._get_rule_month(offsets.QuarterEnd(startingMonth=12)) print(result == 'DEC') result = frequencies._get_rule_month('Q-JAN') assert(result == 'JAN') result = frequencies._get_rule_month(offsets.QuarterEnd(startingMonth=1)) assert(result == 'JAN') result = frequencies._get_rule_month('A-DEC') assert(result == 'DEC') result = frequencies._get_rule_month(offsets.YearEnd()) assert(result == 'DEC') result = frequencies._get_rule_month('A-MAY') assert(result == 'MAY') result = frequencies._get_rule_month(offsets.YearEnd(month=5)) assert(result == 'MAY') class TestFrequencyCode(tm.TestCase): def test_freq_code(self): self.assertEqual(frequencies.get_freq('A'), 1000) self.assertEqual(frequencies.get_freq('3A'), 1000) self.assertEqual(frequencies.get_freq('-1A'), 1000) self.assertEqual(frequencies.get_freq('W'), 4000) self.assertEqual(frequencies.get_freq('W-MON'), 4001) self.assertEqual(frequencies.get_freq('W-FRI'), 4005) for freqstr, code in compat.iteritems(frequencies._period_code_map): result = frequencies.get_freq(freqstr) self.assertEqual(result, code) result = frequencies.get_freq_group(freqstr) self.assertEqual(result, code // 1000 * 1000) result = frequencies.get_freq_group(code) self.assertEqual(result, code // 1000 * 1000) def test_freq_group(self): self.assertEqual(frequencies.get_freq_group('A'), 1000) self.assertEqual(frequencies.get_freq_group('3A'), 1000) self.assertEqual(frequencies.get_freq_group('-1A'), 1000) self.assertEqual(frequencies.get_freq_group('A-JAN'), 1000) self.assertEqual(frequencies.get_freq_group('A-MAY'), 1000) self.assertEqual(frequencies.get_freq_group(offsets.YearEnd()), 1000) self.assertEqual(frequencies.get_freq_group(offsets.YearEnd(month=1)), 1000) self.assertEqual(frequencies.get_freq_group(offsets.YearEnd(month=5)), 1000) self.assertEqual(frequencies.get_freq_group('W'), 4000) self.assertEqual(frequencies.get_freq_group('W-MON'), 4000) self.assertEqual(frequencies.get_freq_group('W-FRI'), 4000) self.assertEqual(frequencies.get_freq_group(offsets.Week()), 4000) self.assertEqual(frequencies.get_freq_group(offsets.Week(weekday=1)), 4000) self.assertEqual(frequencies.get_freq_group(offsets.Week(weekday=5)), 4000) def test_get_to_timestamp_base(self): tsb = frequencies.get_to_timestamp_base self.assertEqual(tsb(frequencies.get_freq_code('D')[0]), frequencies.get_freq_code('D')[0]) self.assertEqual(tsb(frequencies.get_freq_code('W')[0]), frequencies.get_freq_code('D')[0]) self.assertEqual(tsb(frequencies.get_freq_code('M')[0]), frequencies.get_freq_code('D')[0]) self.assertEqual(tsb(frequencies.get_freq_code('S')[0]), frequencies.get_freq_code('S')[0]) self.assertEqual(tsb(frequencies.get_freq_code('T')[0]), frequencies.get_freq_code('S')[0]) self.assertEqual(tsb(frequencies.get_freq_code('H')[0]), frequencies.get_freq_code('S')[0]) def test_freq_to_reso(self): Reso = frequencies.Resolution self.assertEqual(Reso.get_str_from_freq('A'), 'year') self.assertEqual(Reso.get_str_from_freq('Q'), 'quarter') self.assertEqual(Reso.get_str_from_freq('M'), 'month') self.assertEqual(Reso.get_str_from_freq('D'), 'day') self.assertEqual(Reso.get_str_from_freq('H'), 'hour') self.assertEqual(Reso.get_str_from_freq('T'), 'minute') self.assertEqual(Reso.get_str_from_freq('S'), 'second') self.assertEqual(Reso.get_str_from_freq('L'), 'millisecond') self.assertEqual(Reso.get_str_from_freq('U'), 'microsecond') self.assertEqual(Reso.get_str_from_freq('N'), 'nanosecond') for freq in ['A', 'Q', 'M', 'D', 'H', 'T', 'S', 'L', 'U', 'N']: # check roundtrip result = Reso.get_freq(Reso.get_str_from_freq(freq)) self.assertEqual(freq, result) for freq in ['D', 'H', 'T', 'S', 'L', 'U']: result = Reso.get_freq(Reso.get_str(Reso.get_reso_from_freq(freq))) self.assertEqual(freq, result) def test_get_freq_code(self): # freqstr self.assertEqual(frequencies.get_freq_code('A'), (frequencies.get_freq('A'), 1)) self.assertEqual(frequencies.get_freq_code('3D'), (frequencies.get_freq('D'), 3)) self.assertEqual(frequencies.get_freq_code('-2M'), (frequencies.get_freq('M'), -2)) # tuple self.assertEqual(frequencies.get_freq_code(('D', 1)), (frequencies.get_freq('D'), 1)) self.assertEqual(frequencies.get_freq_code(('A', 3)), (frequencies.get_freq('A'), 3)) self.assertEqual(frequencies.get_freq_code(('M', -2)), (frequencies.get_freq('M'), -2)) # numeric tuple self.assertEqual(frequencies.get_freq_code((1000, 1)), (1000, 1)) # offsets self.assertEqual(frequencies.get_freq_code(offsets.Day()), (frequencies.get_freq('D'), 1)) self.assertEqual(frequencies.get_freq_code(offsets.Day(3)), (frequencies.get_freq('D'), 3)) self.assertEqual(frequencies.get_freq_code(offsets.Day(-2)), (frequencies.get_freq('D'), -2)) self.assertEqual(frequencies.get_freq_code(offsets.MonthEnd()), (frequencies.get_freq('M'), 1)) self.assertEqual(frequencies.get_freq_code(offsets.MonthEnd(3)), (frequencies.get_freq('M'), 3)) self.assertEqual(frequencies.get_freq_code(offsets.MonthEnd(-2)), (frequencies.get_freq('M'), -2)) self.assertEqual(frequencies.get_freq_code(offsets.Week()), (frequencies.get_freq('W'), 1)) self.assertEqual(frequencies.get_freq_code(offsets.Week(3)), (frequencies.get_freq('W'), 3)) self.assertEqual(frequencies.get_freq_code(offsets.Week(-2)), (frequencies.get_freq('W'), -2)) # monday is weekday=0 self.assertEqual(frequencies.get_freq_code(offsets.Week(weekday=1)), (frequencies.get_freq('W-TUE'), 1)) self.assertEqual(frequencies.get_freq_code(offsets.Week(3, weekday=0)), (frequencies.get_freq('W-MON'), 3)) self.assertEqual(frequencies.get_freq_code(offsets.Week(-2, weekday=4)), (frequencies.get_freq('W-FRI'), -2)) _dti = DatetimeIndex class TestFrequencyInference(tm.TestCase): def test_raise_if_period_index(self): index = PeriodIndex(start="1/1/1990", periods=20, freq="M") self.assertRaises(TypeError, frequencies.infer_freq, index) def test_raise_if_too_few(self): index = _dti(['12/31/1998', '1/3/1999']) self.assertRaises(ValueError, frequencies.infer_freq, index) def test_business_daily(self): index = _dti(['12/31/1998', '1/3/1999', '1/4/1999']) self.assertEqual(frequencies.infer_freq(index), 'B') def test_day(self): self._check_tick(timedelta(1), 'D') def test_day_corner(self): index = _dti(['1/1/2000', '1/2/2000', '1/3/2000']) self.assertEqual(frequencies.infer_freq(index), 'D') def test_non_datetimeindex(self): dates = to_datetime(['1/1/2000', '1/2/2000', '1/3/2000']) self.assertEqual(frequencies.infer_freq(dates), 'D') def test_hour(self): self._check_tick(timedelta(hours=1), 'H') def test_minute(self): self._check_tick(timedelta(minutes=1), 'T') def test_second(self): self._check_tick(timedelta(seconds=1), 'S') def test_millisecond(self): self._check_tick(timedelta(microseconds=1000), 'L') def test_microsecond(self): self._check_tick(timedelta(microseconds=1), 'U') def test_nanosecond(self): self._check_tick(np.timedelta64(1, 'ns'), 'N') def _check_tick(self, base_delta, code): b = Timestamp(datetime.now()) for i in range(1, 5): inc = base_delta * i index = _dti([b + inc * j for j in range(3)]) if i > 1: exp_freq = '%d%s' % (i, code) else: exp_freq = code self.assertEqual(frequencies.infer_freq(index), exp_freq) index = _dti([b + base_delta * 7] + [b + base_delta * j for j in range(3)]) self.assertIsNone(frequencies.infer_freq(index)) index = _dti([b + base_delta * j for j in range(3)] + [b + base_delta * 7]) self.assertIsNone(frequencies.infer_freq(index)) def test_weekly(self): days = ['MON', 'TUE', 'WED', 'THU', 'FRI', 'SAT', 'SUN'] for day in days: self._check_generated_range('1/1/2000', 'W-%s' % day) def test_week_of_month(self): days = ['MON', 'TUE', 'WED', 'THU', 'FRI', 'SAT', 'SUN'] for day in days: for i in range(1, 5): self._check_generated_range('1/1/2000', 'WOM-%d%s' % (i, day)) def test_fifth_week_of_month(self): # Only supports freq up to WOM-4. See #9425 func = lambda: date_range('2014-01-01', freq='WOM-5MON') self.assertRaises(ValueError, func) def test_fifth_week_of_month_infer(self): # Only attempts to infer up to WOM-4. See #9425 index = DatetimeIndex(["2014-03-31", "2014-06-30", "2015-03-30"]) assert frequencies.infer_freq(index) is None def test_week_of_month_fake(self): #All of these dates are on same day of week and are 4 or 5 weeks apart index = DatetimeIndex(["2013-08-27","2013-10-01","2013-10-29","2013-11-26"]) assert frequencies.infer_freq(index) != 'WOM-4TUE' def test_monthly(self): self._check_generated_range('1/1/2000', 'M') def test_monthly_ambiguous(self): rng = _dti(['1/31/2000', '2/29/2000', '3/31/2000']) self.assertEqual(rng.inferred_freq, 'M') def test_business_monthly(self): self._check_generated_range('1/1/2000', 'BM') def test_business_start_monthly(self): self._check_generated_range('1/1/2000', 'BMS') def test_quarterly(self): for month in ['JAN', 'FEB', 'MAR']: self._check_generated_range('1/1/2000', 'Q-%s' % month) def test_annual(self): for month in MONTHS: self._check_generated_range('1/1/2000', 'A-%s' % month) def test_business_annual(self): for month in MONTHS: self._check_generated_range('1/1/2000', 'BA-%s' % month) def test_annual_ambiguous(self): rng = _dti(['1/31/2000', '1/31/2001', '1/31/2002']) self.assertEqual(rng.inferred_freq, 'A-JAN') def _check_generated_range(self, start, freq): freq = freq.upper() gen = date_range(start, periods=7, freq=freq) index = _dti(gen.values) if not freq.startswith('Q-'): self.assertEqual(frequencies.infer_freq(index), gen.freqstr) else: inf_freq = frequencies.infer_freq(index) self.assertTrue((inf_freq == 'Q-DEC' and gen.freqstr in ('Q', 'Q-DEC', 'Q-SEP', 'Q-JUN', 'Q-MAR')) or (inf_freq == 'Q-NOV' and gen.freqstr in ('Q-NOV', 'Q-AUG', 'Q-MAY', 'Q-FEB')) or (inf_freq == 'Q-OCT' and gen.freqstr in ('Q-OCT', 'Q-JUL', 'Q-APR', 'Q-JAN'))) gen = date_range(start, periods=5, freq=freq) index = _dti(gen.values) if not freq.startswith('Q-'): self.assertEqual(frequencies.infer_freq(index), gen.freqstr) else: inf_freq = frequencies.infer_freq(index) self.assertTrue((inf_freq == 'Q-DEC' and gen.freqstr in ('Q', 'Q-DEC', 'Q-SEP', 'Q-JUN', 'Q-MAR')) or (inf_freq == 'Q-NOV' and gen.freqstr in ('Q-NOV', 'Q-AUG', 'Q-MAY', 'Q-FEB')) or (inf_freq == 'Q-OCT' and gen.freqstr in ('Q-OCT', 'Q-JUL', 'Q-APR', 'Q-JAN'))) def test_infer_freq(self): rng = period_range('1959Q2', '2009Q3', freq='Q') rng = Index(rng.to_timestamp('D', how='e').asobject) self.assertEqual(rng.inferred_freq, 'Q-DEC') rng = period_range('1959Q2', '2009Q3', freq='Q-NOV') rng = Index(rng.to_timestamp('D', how='e').asobject) self.assertEqual(rng.inferred_freq, 'Q-NOV') rng = period_range('1959Q2', '2009Q3', freq='Q-OCT') rng = Index(rng.to_timestamp('D', how='e').asobject) self.assertEqual(rng.inferred_freq, 'Q-OCT') def test_infer_freq_tz(self): freqs = {'AS-JAN': ['2009-01-01', '2010-01-01', '2011-01-01', '2012-01-01'], 'Q-OCT': ['2009-01-31', '2009-04-30', '2009-07-31', '2009-10-31'], 'M': ['2010-11-30', '2010-12-31', '2011-01-31', '2011-02-28'], 'W-SAT': ['2010-12-25', '2011-01-01', '2011-01-08', '2011-01-15'], 'D': ['2011-01-01', '2011-01-02', '2011-01-03', '2011-01-04'], 'H': ['2011-12-31 22:00', '2011-12-31 23:00', '2012-01-01 00:00', '2012-01-01 01:00'] } # GH 7310 for tz in [None, 'Australia/Sydney', 'Asia/Tokyo', 'Europe/Paris', 'US/Pacific', 'US/Eastern']: for expected, dates in compat.iteritems(freqs): idx = DatetimeIndex(dates, tz=tz) self.assertEqual(idx.inferred_freq, expected) def test_infer_freq_tz_transition(self): # Tests for #8772 date_pairs = [['2013-11-02', '2013-11-5'], #Fall DST ['2014-03-08', '2014-03-11'], #Spring DST ['2014-01-01', '2014-01-03']] #Regular Time freqs = ['3H', '10T', '3601S', '3600001L', '3600000001U', '3600000000001N'] for tz in [None, 'Australia/Sydney', 'Asia/Tokyo', 'Europe/Paris', 'US/Pacific', 'US/Eastern']: for date_pair in date_pairs: for freq in freqs: idx = date_range(date_pair[0], date_pair[1], freq=freq, tz=tz) self.assertEqual(idx.inferred_freq, freq) index = date_range("2013-11-03", periods=5, freq="3H").tz_localize("America/Chicago") self.assertIsNone(index.inferred_freq) def test_infer_freq_businesshour(self): # GH 7905 idx = DatetimeIndex(['2014-07-01 09:00', '2014-07-01 10:00', '2014-07-01 11:00', '2014-07-01 12:00', '2014-07-01 13:00', '2014-07-01 14:00']) # hourly freq in a day must result in 'H' self.assertEqual(idx.inferred_freq, 'H') idx = DatetimeIndex(['2014-07-01 09:00', '2014-07-01 10:00', '2014-07-01 11:00', '2014-07-01 12:00', '2014-07-01 13:00', '2014-07-01 14:00', '2014-07-01 15:00', '2014-07-01 16:00', '2014-07-02 09:00', '2014-07-02 10:00', '2014-07-02 11:00']) self.assertEqual(idx.inferred_freq, 'BH') idx = DatetimeIndex(['2014-07-04 09:00', '2014-07-04 10:00', '2014-07-04 11:00', '2014-07-04 12:00', '2014-07-04 13:00', '2014-07-04 14:00', '2014-07-04 15:00', '2014-07-04 16:00', '2014-07-07 09:00', '2014-07-07 10:00', '2014-07-07 11:00']) self.assertEqual(idx.inferred_freq, 'BH') idx = DatetimeIndex(['2014-07-04 09:00', '2014-07-04 10:00', '2014-07-04 11:00', '2014-07-04 12:00', '2014-07-04 13:00', '2014-07-04 14:00', '2014-07-04 15:00', '2014-07-04 16:00', '2014-07-07 09:00', '2014-07-07 10:00', '2014-07-07 11:00', '2014-07-07 12:00', '2014-07-07 13:00', '2014-07-07 14:00', '2014-07-07 15:00', '2014-07-07 16:00', '2014-07-08 09:00', '2014-07-08 10:00', '2014-07-08 11:00', '2014-07-08 12:00', '2014-07-08 13:00', '2014-07-08 14:00', '2014-07-08 15:00', '2014-07-08 16:00']) self.assertEqual(idx.inferred_freq, 'BH') def test_not_monotonic(self): rng = _dti(['1/31/2000', '1/31/2001', '1/31/2002']) rng = rng[::-1] self.assertEqual(rng.inferred_freq, '-1A-JAN') def test_non_datetimeindex(self): rng = _dti(['1/31/2000', '1/31/2001', '1/31/2002']) vals = rng.to_pydatetime() result = frequencies.infer_freq(vals) self.assertEqual(result, rng.inferred_freq) def test_invalid_index_types(self): # test all index types for i in [ tm.makeIntIndex(10), tm.makeFloatIndex(10), tm.makePeriodIndex(10) ]: self.assertRaises(TypeError, lambda : frequencies.infer_freq(i)) # GH 10822 # odd error message on conversions to datetime for unicode if not is_platform_windows(): for i in [ tm.makeStringIndex(10), tm.makeUnicodeIndex(10) ]: self.assertRaises(ValueError, lambda : frequencies.infer_freq(i)) def test_string_datetimelike_compat(self): # GH 6463 expected = frequencies.infer_freq(['2004-01', '2004-02', '2004-03', '2004-04']) result = frequencies.infer_freq(Index(['2004-01', '2004-02', '2004-03', '2004-04'])) self.assertEqual(result,expected) def test_series(self): # GH6407 # inferring series # invalid type of Series for s in [ Series(np.arange(10)), Series(np.arange(10.))]: self.assertRaises(TypeError, lambda : frequencies.infer_freq(s)) # a non-convertible string self.assertRaises(ValueError, lambda : frequencies.infer_freq(Series(['foo','bar']))) # cannot infer on PeriodIndex for freq in [None, 'L']: s = Series(period_range('2013',periods=10,freq=freq)) self.assertRaises(TypeError, lambda : frequencies.infer_freq(s)) for freq in ['Y']: with tm.assert_produces_warning(FutureWarning, check_stacklevel=False): s = Series(period_range('2013',periods=10,freq=freq)) self.assertRaises(TypeError, lambda : frequencies.infer_freq(s)) # DateTimeIndex for freq in ['M', 'L', 'S']: s = Series(date_range('20130101',periods=10,freq=freq)) inferred = frequencies.infer_freq(s) self.assertEqual(inferred,freq) s = Series(date_range('20130101','20130110')) inferred = frequencies.infer_freq(s) self.assertEqual(inferred,'D') def test_legacy_offset_warnings(self): for k, v in compat.iteritems(frequencies._rule_aliases): with tm.assert_produces_warning(FutureWarning): result = frequencies.get_offset(k) exp = frequencies.get_offset(v) self.assertEqual(result, exp) with tm.assert_produces_warning(FutureWarning, check_stacklevel=False): idx = date_range('2011-01-01', periods=5, freq=k) exp = date_range('2011-01-01', periods=5, freq=v) self.assert_index_equal(idx, exp) MONTHS = ['JAN', 'FEB', 'MAR', 'APR', 'MAY', 'JUN', 'JUL', 'AUG', 'SEP', 'OCT', 'NOV', 'DEC'] def test_is_superperiod_subperiod(): assert(frequencies.is_superperiod(offsets.YearEnd(), offsets.MonthEnd())) assert(frequencies.is_subperiod(offsets.MonthEnd(), offsets.YearEnd())) assert(frequencies.is_superperiod(offsets.Hour(), offsets.Minute())) assert(frequencies.is_subperiod(offsets.Minute(), offsets.Hour())) assert(frequencies.is_superperiod(offsets.Second(), offsets.Milli())) assert(frequencies.is_subperiod(offsets.Milli(), offsets.Second())) assert(frequencies.is_superperiod(offsets.Milli(), offsets.Micro())) assert(frequencies.is_subperiod(offsets.Micro(), offsets.Milli())) assert(frequencies.is_superperiod(offsets.Micro(), offsets.Nano())) assert(frequencies.is_subperiod(offsets.Nano(), offsets.Micro())) if __name__ == '__main__': import nose nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'], exit=False)
apache-2.0
AlexanderFabisch/scikit-learn
sklearn/decomposition/tests/test_pca.py
21
11810
import numpy as np from sklearn.utils.testing import assert_almost_equal from sklearn.utils.testing import assert_array_almost_equal from sklearn.utils.testing import assert_true from sklearn.utils.testing import assert_equal from sklearn.utils.testing import assert_greater from sklearn.utils.testing import assert_raises from sklearn.utils.testing import assert_no_warnings from sklearn import datasets from sklearn.decomposition import PCA from sklearn.decomposition import RandomizedPCA from sklearn.decomposition.pca import _assess_dimension_ from sklearn.decomposition.pca import _infer_dimension_ iris = datasets.load_iris() def test_pca(): # PCA on dense arrays pca = PCA(n_components=2) X = iris.data X_r = pca.fit(X).transform(X) np.testing.assert_equal(X_r.shape[1], 2) X_r2 = pca.fit_transform(X) assert_array_almost_equal(X_r, X_r2) pca = PCA() pca.fit(X) assert_almost_equal(pca.explained_variance_ratio_.sum(), 1.0, 3) X_r = pca.transform(X) X_r2 = pca.fit_transform(X) assert_array_almost_equal(X_r, X_r2) # Test get_covariance and get_precision with n_components == n_features # with n_components < n_features and with n_components == 0 for n_components in [0, 2, X.shape[1]]: pca.n_components = n_components pca.fit(X) cov = pca.get_covariance() precision = pca.get_precision() assert_array_almost_equal(np.dot(cov, precision), np.eye(X.shape[1]), 12) def test_no_empty_slice_warning(): # test if we avoid numpy warnings for computing over empty arrays n_components = 10 n_features = n_components + 2 # anything > n_comps triggerred it in 0.16 X = np.random.uniform(-1, 1, size=(n_components, n_features)) pca = PCA(n_components=n_components) assert_no_warnings(pca.fit, X) def test_whitening(): # Check that PCA output has unit-variance rng = np.random.RandomState(0) n_samples = 100 n_features = 80 n_components = 30 rank = 50 # some low rank data with correlated features X = np.dot(rng.randn(n_samples, rank), np.dot(np.diag(np.linspace(10.0, 1.0, rank)), rng.randn(rank, n_features))) # the component-wise variance of the first 50 features is 3 times the # mean component-wise variance of the remaingin 30 features X[:, :50] *= 3 assert_equal(X.shape, (n_samples, n_features)) # the component-wise variance is thus highly varying: assert_almost_equal(X.std(axis=0).std(), 43.9, 1) for this_PCA, copy in [(x, y) for x in (PCA, RandomizedPCA) for y in (True, False)]: # whiten the data while projecting to the lower dim subspace X_ = X.copy() # make sure we keep an original across iterations. pca = this_PCA(n_components=n_components, whiten=True, copy=copy) if hasattr(pca, 'random_state'): pca.random_state = rng # test fit_transform X_whitened = pca.fit_transform(X_.copy()) assert_equal(X_whitened.shape, (n_samples, n_components)) X_whitened2 = pca.transform(X_) assert_array_almost_equal(X_whitened, X_whitened2) assert_almost_equal(X_whitened.std(axis=0), np.ones(n_components), decimal=4) assert_almost_equal(X_whitened.mean(axis=0), np.zeros(n_components)) X_ = X.copy() pca = this_PCA(n_components=n_components, whiten=False, copy=copy).fit(X_) X_unwhitened = pca.transform(X_) assert_equal(X_unwhitened.shape, (n_samples, n_components)) # in that case the output components still have varying variances assert_almost_equal(X_unwhitened.std(axis=0).std(), 74.1, 1) # we always center, so no test for non-centering. def test_explained_variance(): # Check that PCA output has unit-variance rng = np.random.RandomState(0) n_samples = 100 n_features = 80 X = rng.randn(n_samples, n_features) pca = PCA(n_components=2).fit(X) rpca = RandomizedPCA(n_components=2, random_state=rng).fit(X) assert_array_almost_equal(pca.explained_variance_ratio_, rpca.explained_variance_ratio_, 1) # compare to empirical variances X_pca = pca.transform(X) assert_array_almost_equal(pca.explained_variance_, np.var(X_pca, axis=0)) X_rpca = rpca.transform(X) assert_array_almost_equal(rpca.explained_variance_, np.var(X_rpca, axis=0), decimal=1) # Same with correlated data X = datasets.make_classification(n_samples, n_features, n_informative=n_features-2, random_state=rng)[0] pca = PCA(n_components=2).fit(X) rpca = RandomizedPCA(n_components=2, random_state=rng).fit(X) assert_array_almost_equal(pca.explained_variance_ratio_, rpca.explained_variance_ratio_, 5) def test_pca_check_projection(): # Test that the projection of data is correct rng = np.random.RandomState(0) n, p = 100, 3 X = rng.randn(n, p) * .1 X[:10] += np.array([3, 4, 5]) Xt = 0.1 * rng.randn(1, p) + np.array([3, 4, 5]) Yt = PCA(n_components=2).fit(X).transform(Xt) Yt /= np.sqrt((Yt ** 2).sum()) assert_almost_equal(np.abs(Yt[0][0]), 1., 1) def test_pca_inverse(): # Test that the projection of data can be inverted rng = np.random.RandomState(0) n, p = 50, 3 X = rng.randn(n, p) # spherical data X[:, 1] *= .00001 # make middle component relatively small X += [5, 4, 3] # make a large mean # same check that we can find the original data from the transformed # signal (since the data is almost of rank n_components) pca = PCA(n_components=2).fit(X) Y = pca.transform(X) Y_inverse = pca.inverse_transform(Y) assert_almost_equal(X, Y_inverse, decimal=3) # same as above with whitening (approximate reconstruction) pca = PCA(n_components=2, whiten=True) pca.fit(X) Y = pca.transform(X) Y_inverse = pca.inverse_transform(Y) assert_almost_equal(X, Y_inverse, decimal=3) def test_pca_validation(): X = [[0, 1], [1, 0]] for n_components in [-1, 3]: assert_raises(ValueError, PCA(n_components).fit, X) def test_randomized_pca_check_projection(): # Test that the projection by RandomizedPCA on dense data is correct rng = np.random.RandomState(0) n, p = 100, 3 X = rng.randn(n, p) * .1 X[:10] += np.array([3, 4, 5]) Xt = 0.1 * rng.randn(1, p) + np.array([3, 4, 5]) Yt = RandomizedPCA(n_components=2, random_state=0).fit(X).transform(Xt) Yt /= np.sqrt((Yt ** 2).sum()) assert_almost_equal(np.abs(Yt[0][0]), 1., 1) def test_randomized_pca_check_list(): # Test that the projection by RandomizedPCA on list data is correct X = [[1.0, 0.0], [0.0, 1.0]] X_transformed = RandomizedPCA(n_components=1, random_state=0).fit(X).transform(X) assert_equal(X_transformed.shape, (2, 1)) assert_almost_equal(X_transformed.mean(), 0.00, 2) assert_almost_equal(X_transformed.std(), 0.71, 2) def test_randomized_pca_inverse(): # Test that RandomizedPCA is inversible on dense data rng = np.random.RandomState(0) n, p = 50, 3 X = rng.randn(n, p) # spherical data X[:, 1] *= .00001 # make middle component relatively small X += [5, 4, 3] # make a large mean # same check that we can find the original data from the transformed signal # (since the data is almost of rank n_components) pca = RandomizedPCA(n_components=2, random_state=0).fit(X) Y = pca.transform(X) Y_inverse = pca.inverse_transform(Y) assert_almost_equal(X, Y_inverse, decimal=2) # same as above with whitening (approximate reconstruction) pca = RandomizedPCA(n_components=2, whiten=True, random_state=0).fit(X) Y = pca.transform(X) Y_inverse = pca.inverse_transform(Y) relative_max_delta = (np.abs(X - Y_inverse) / np.abs(X).mean()).max() assert_almost_equal(relative_max_delta, 0.11, decimal=2) def test_pca_dim(): # Check automated dimensionality setting rng = np.random.RandomState(0) n, p = 100, 5 X = rng.randn(n, p) * .1 X[:10] += np.array([3, 4, 5, 1, 2]) pca = PCA(n_components='mle').fit(X) assert_equal(pca.n_components, 'mle') assert_equal(pca.n_components_, 1) def test_infer_dim_1(): # TODO: explain what this is testing # Or at least use explicit variable names... n, p = 1000, 5 rng = np.random.RandomState(0) X = (rng.randn(n, p) * .1 + rng.randn(n, 1) * np.array([3, 4, 5, 1, 2]) + np.array([1, 0, 7, 4, 6])) pca = PCA(n_components=p) pca.fit(X) spect = pca.explained_variance_ ll = [] for k in range(p): ll.append(_assess_dimension_(spect, k, n, p)) ll = np.array(ll) assert_greater(ll[1], ll.max() - .01 * n) def test_infer_dim_2(): # TODO: explain what this is testing # Or at least use explicit variable names... n, p = 1000, 5 rng = np.random.RandomState(0) X = rng.randn(n, p) * .1 X[:10] += np.array([3, 4, 5, 1, 2]) X[10:20] += np.array([6, 0, 7, 2, -1]) pca = PCA(n_components=p) pca.fit(X) spect = pca.explained_variance_ assert_greater(_infer_dimension_(spect, n, p), 1) def test_infer_dim_3(): n, p = 100, 5 rng = np.random.RandomState(0) X = rng.randn(n, p) * .1 X[:10] += np.array([3, 4, 5, 1, 2]) X[10:20] += np.array([6, 0, 7, 2, -1]) X[30:40] += 2 * np.array([-1, 1, -1, 1, -1]) pca = PCA(n_components=p) pca.fit(X) spect = pca.explained_variance_ assert_greater(_infer_dimension_(spect, n, p), 2) def test_infer_dim_by_explained_variance(): X = iris.data pca = PCA(n_components=0.95) pca.fit(X) assert_equal(pca.n_components, 0.95) assert_equal(pca.n_components_, 2) pca = PCA(n_components=0.01) pca.fit(X) assert_equal(pca.n_components, 0.01) assert_equal(pca.n_components_, 1) rng = np.random.RandomState(0) # more features than samples X = rng.rand(5, 20) pca = PCA(n_components=.5).fit(X) assert_equal(pca.n_components, 0.5) assert_equal(pca.n_components_, 2) def test_pca_score(): # Test that probabilistic PCA scoring yields a reasonable score n, p = 1000, 3 rng = np.random.RandomState(0) X = rng.randn(n, p) * .1 + np.array([3, 4, 5]) pca = PCA(n_components=2) pca.fit(X) ll1 = pca.score(X) h = -0.5 * np.log(2 * np.pi * np.exp(1) * 0.1 ** 2) * p np.testing.assert_almost_equal(ll1 / h, 1, 0) def test_pca_score2(): # Test that probabilistic PCA correctly separated different datasets n, p = 100, 3 rng = np.random.RandomState(0) X = rng.randn(n, p) * .1 + np.array([3, 4, 5]) pca = PCA(n_components=2) pca.fit(X) ll1 = pca.score(X) ll2 = pca.score(rng.randn(n, p) * .2 + np.array([3, 4, 5])) assert_greater(ll1, ll2) # Test that it gives the same scores if whiten=True pca = PCA(n_components=2, whiten=True) pca.fit(X) ll2 = pca.score(X) assert_almost_equal(ll1, ll2) def test_pca_score3(): # Check that probabilistic PCA selects the right model n, p = 200, 3 rng = np.random.RandomState(0) Xl = (rng.randn(n, p) + rng.randn(n, 1) * np.array([3, 4, 5]) + np.array([1, 0, 7])) Xt = (rng.randn(n, p) + rng.randn(n, 1) * np.array([3, 4, 5]) + np.array([1, 0, 7])) ll = np.zeros(p) for k in range(p): pca = PCA(n_components=k) pca.fit(Xl) ll[k] = pca.score(Xt) assert_true(ll.argmax() == 1)
bsd-3-clause
vene/ambra
ambra/cross_validation.py
1
9371
import numbers import time import numpy as np from sklearn.utils import safe_indexing from sklearn.base import is_classifier, clone from sklearn.metrics.scorer import check_scoring from sklearn.externals.joblib import Parallel, delayed, logger from ambra.backports import _num_samples, indexable from sklearn.cross_validation import check_cv def _safe_split(estimator, X, y, indices, train_indices=None): """Create subset of dataset and properly handle kernels.""" if hasattr(estimator, 'kernel') and callable(estimator.kernel): # cannot compute the kernel values with custom function raise ValueError("Cannot use a custom kernel function. " "Precompute the kernel matrix instead.") if not hasattr(X, "shape"): if getattr(estimator, "_pairwise", False): raise ValueError("Precomputed kernels or affinity matrices have " "to be passed as arrays or sparse matrices.") X_subset = [X[idx] for idx in indices] else: if getattr(estimator, "_pairwise", False): # X is a precomputed square kernel matrix if X.shape[0] != X.shape[1]: raise ValueError("X should be a square kernel matrix") if train_indices is None: X_subset = X[np.ix_(indices, indices)] else: X_subset = X[np.ix_(indices, train_indices)] else: X_subset = safe_indexing(X, indices) if y is not None: y_subset = safe_indexing(y, indices) else: y_subset = None return X_subset, y_subset def _score(estimator, X_test, y_test, scorer, **params): """Compute the score of an estimator on a given test set.""" if y_test is None: score = scorer(estimator, X_test, **params) else: score = scorer(estimator, X_test, y_test, **params) if not isinstance(score, numbers.Number): raise ValueError("scoring must return a number, got %s (%s) instead." % (str(score), type(score))) return score def cross_val_score(estimator, X, y=None, scoring=None, cv=None, n_jobs=1, verbose=0, fit_params=None, pre_dispatch='2*n_jobs', scorer_params=None): """Evaluate a score by cross-validation Parameters ---------- estimator : estimator object implementing 'fit' The object to use to fit the data. X : array-like The data to fit. Can be, for example a list, or an array at least 2d. y : array-like, optional, default: None The target variable to try to predict in the case of supervised learning. scoring : string, callable or None, optional, default: None A string (see model evaluation documentation) or a scorer callable object / function with signature ``scorer(estimator, X, y)``. cv : cross-validation generator or int, optional, default: None A cross-validation generator to use. If int, determines the number of folds in StratifiedKFold if y is binary or multiclass and estimator is a classifier, or the number of folds in KFold otherwise. If None, it is equivalent to cv=3. n_jobs : integer, optional The number of CPUs to use to do the computation. -1 means 'all CPUs'. verbose : integer, optional The verbosity level. fit_params : dict, optional Parameters to pass to the fit method of the estimator. pre_dispatch : int, or string, optional Controls the number of jobs that get dispatched during parallel execution. Reducing this number can be useful to avoid an explosion of memory consumption when more jobs get dispatched than CPUs can process. This parameter can be: - None, in which case all the jobs are immediately created and spawned. Use this for lightweight and fast-running jobs, to avoid delays due to on-demand spawning of the jobs - An int, giving the exact number of total jobs that are spawned - A string, giving an expression as a function of n_jobs, as in '2*n_jobs' scorer_params : dict, optional Parameters to pass to the scorer. Can be used for sample weights and sample groups. Returns ------- scores : array of float, shape=(len(list(cv)),) Array of scores of the estimator for each run of the cross validation. """ X, y = indexable(X, y) cv = check_cv(cv, X, y, classifier=is_classifier(estimator)) scorer = check_scoring(estimator, scoring=scoring) # We clone the estimator to make sure that all the folds are # independent, and that it is pickle-able. parallel = Parallel(n_jobs=n_jobs, verbose=verbose, pre_dispatch=pre_dispatch) scores = parallel(delayed(_fit_and_score)(clone(estimator), X, y, scorer, train, test, verbose, None, fit_params, scorer_params) for train, test in cv) return np.array(scores)[:, 0] def _fit_and_score(estimator, X, y, scorer, train, test, verbose, parameters, fit_params, scorer_params, return_train_score=False, return_parameters=False): """Fit estimator and compute scores for a given dataset split. Parameters ---------- estimator : estimator object implementing 'fit' The object to use to fit the data. X : array-like of shape at least 2D The data to fit. y : array-like or None The target variable to try to predict in the case of supervised learning. scoring : callable A scorer callable object / function with signature ``scorer(estimator, X, y)``. train : array-like, shape = (n_train_samples,) Indices of training samples. test : array-like, shape = (n_test_samples,) Indices of test samples. verbose : integer The verbosity level. parameters : dict or None Parameters to be set on the estimator. fit_params : dict or None Parameters that will be passed to ``estimator.fit``. scorer_params : dict or None Parameters that will be passed to the scorer. return_train_score : boolean, optional, default: False Compute and return score on training set. return_parameters : boolean, optional, default: False Return parameters that has been used for the estimator. Returns ------- train_score : float, optional Score on training set, returned only if `return_train_score` is `True`. test_score : float Score on test set. n_test_samples : int Number of test samples. scoring_time : float Time spent for fitting and scoring in seconds. parameters : dict or None, optional The parameters that have been evaluated. """ if verbose > 1: if parameters is None: msg = "no parameters to be set" else: msg = '%s' % (', '.join('%s=%s' % (k, v) for k, v in parameters.items())) print("[CV] %s %s" % (msg, (64 - len(msg)) * '.')) # Adjust lenght of sample weights n_samples = _num_samples(X) fit_params = fit_params if fit_params is not None else {} fit_params = dict([(k, np.asarray(v)[train] if hasattr(v, '__len__') and len(v) == n_samples else v) for k, v in fit_params.items()]) # Same, but take both slices scorer_params = scorer_params if scorer_params is not None else {} train_scorer_params = dict([(k, np.asarray(v)[train] if hasattr(v, '__len__') and len(v) == n_samples else v) for k, v in scorer_params.items()]) test_scorer_params = dict([(k, np.asarray(v)[test] if hasattr(v, '__len__') and len(v) == n_samples else v) for k, v in scorer_params.items()]) if parameters is not None: estimator.set_params(**parameters) start_time = time.time() X_train, y_train = _safe_split(estimator, X, y, train) X_test, y_test = _safe_split(estimator, X, y, test, train) if y_train is None: estimator.fit(X_train, **fit_params) else: estimator.fit(X_train, y_train, **fit_params) test_score = _score(estimator, X_test, y_test, scorer, **test_scorer_params) if return_train_score: train_score = _score(estimator, X_train, y_train, scorer, **train_scorer_params) scoring_time = time.time() - start_time if verbose > 2: msg += ", score=%f" % test_score if verbose > 1: end_msg = "%s -%s" % (msg, logger.short_format_time(scoring_time)) print("[CV] %s %s" % ((64 - len(end_msg)) * '.', end_msg)) ret = [train_score] if return_train_score else [] ret.extend([test_score, _num_samples(X_test), scoring_time]) if return_parameters: ret.append(parameters) return ret
bsd-2-clause
ZenDevelopmentSystems/scikit-learn
benchmarks/bench_lasso.py
297
3305
""" Benchmarks of Lasso vs LassoLars First, we fix a training set and increase the number of samples. Then we plot the computation time as function of the number of samples. In the second benchmark, we increase the number of dimensions of the training set. Then we plot the computation time as function of the number of dimensions. In both cases, only 10% of the features are informative. """ import gc from time import time import numpy as np from sklearn.datasets.samples_generator import make_regression def compute_bench(alpha, n_samples, n_features, precompute): lasso_results = [] lars_lasso_results = [] it = 0 for ns in n_samples: for nf in n_features: it += 1 print('==================') print('Iteration %s of %s' % (it, max(len(n_samples), len(n_features)))) print('==================') n_informative = nf // 10 X, Y, coef_ = make_regression(n_samples=ns, n_features=nf, n_informative=n_informative, noise=0.1, coef=True) X /= np.sqrt(np.sum(X ** 2, axis=0)) # Normalize data gc.collect() print("- benchmarking Lasso") clf = Lasso(alpha=alpha, fit_intercept=False, precompute=precompute) tstart = time() clf.fit(X, Y) lasso_results.append(time() - tstart) gc.collect() print("- benchmarking LassoLars") clf = LassoLars(alpha=alpha, fit_intercept=False, normalize=False, precompute=precompute) tstart = time() clf.fit(X, Y) lars_lasso_results.append(time() - tstart) return lasso_results, lars_lasso_results if __name__ == '__main__': from sklearn.linear_model import Lasso, LassoLars import pylab as pl alpha = 0.01 # regularization parameter n_features = 10 list_n_samples = np.linspace(100, 1000000, 5).astype(np.int) lasso_results, lars_lasso_results = compute_bench(alpha, list_n_samples, [n_features], precompute=True) pl.figure('scikit-learn LASSO benchmark results') pl.subplot(211) pl.plot(list_n_samples, lasso_results, 'b-', label='Lasso') pl.plot(list_n_samples, lars_lasso_results, 'r-', label='LassoLars') pl.title('precomputed Gram matrix, %d features, alpha=%s' % (n_features, alpha)) pl.legend(loc='upper left') pl.xlabel('number of samples') pl.ylabel('Time (s)') pl.axis('tight') n_samples = 2000 list_n_features = np.linspace(500, 3000, 5).astype(np.int) lasso_results, lars_lasso_results = compute_bench(alpha, [n_samples], list_n_features, precompute=False) pl.subplot(212) pl.plot(list_n_features, lasso_results, 'b-', label='Lasso') pl.plot(list_n_features, lars_lasso_results, 'r-', label='LassoLars') pl.title('%d samples, alpha=%s' % (n_samples, alpha)) pl.legend(loc='upper left') pl.xlabel('number of features') pl.ylabel('Time (s)') pl.axis('tight') pl.show()
bsd-3-clause
eric-haibin-lin/mxnet
example/named_entity_recognition/src/ner.py
4
12663
# !/usr/bin/env python # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. # -*- coding: utf-8 -*- from collections import Counter import itertools import iterators import os import numpy as np import pandas as pd import mxnet as mx import argparse import pickle import logging logging.basicConfig(level=logging.DEBUG) parser = argparse.ArgumentParser(description="Deep neural network for multivariate time series forecasting", formatter_class=argparse.ArgumentDefaultsHelpFormatter) parser.add_argument('--data-dir', type=str, default='../data', help='relative path to input data') parser.add_argument('--output-dir', type=str, default='../results', help='directory to save model files to') parser.add_argument('--max-records', type=int, default=None, help='total records before data split') parser.add_argument('--train_fraction', type=float, default=0.8, help='fraction of data to use for training. remainder used for testing.') parser.add_argument('--batch-size', type=int, default=128, help='the batch size.') parser.add_argument('--buckets', type=str, default="", help='unique bucket sizes') parser.add_argument('--char-embed', type=int, default=25, help='Embedding size for each unique character.') parser.add_argument('--char-filter-list', type=str, default="3,4,5", help='unique filter sizes for char level cnn') parser.add_argument('--char-filters', type=int, default=20, help='number of each filter size') parser.add_argument('--word-embed', type=int, default=500, help='Embedding size for each unique character.') parser.add_argument('--word-filter-list', type=str, default="3,4,5", help='unique filter sizes for char level cnn') parser.add_argument('--word-filters', type=int, default=200, help='number of each filter size') parser.add_argument('--lstm-state-size', type=int, default=100, help='number of hidden units in each unrolled recurrent cell') parser.add_argument('--lstm-layers', type=int, default=1, help='number of recurrent layers') parser.add_argument('--gpus', type=str, default='', help='list of gpus to run, e.g. 0 or 0,2,5. empty means using cpu. ') parser.add_argument('--optimizer', type=str, default='adam', help='the optimizer type') parser.add_argument('--lr', type=float, default=0.001, help='initial learning rate') parser.add_argument('--dropout', type=float, default=0.2, help='dropout rate for network') parser.add_argument('--num-epochs', type=int, default=100, help='max num of epochs') parser.add_argument('--save-period', type=int, default=20, help='save checkpoint for every n epochs') parser.add_argument('--model_prefix', type=str, default='electricity_model', help='prefix for saving model params') def save_obj(obj, name): with open(name + '.pkl', 'wb') as f: pickle.dump(obj, f, pickle.HIGHEST_PROTOCOL) def save_model(): if not os.path.exists(args.output_dir): os.mkdir(args.output_dir) return mx.callback.do_checkpoint(os.path.join(args.output_dir, "checkpoint"), args.save_period) def build_vocab(nested_list): """ :param nested_list: list of list of string :return: dictionary mapping from string to int, inverse of that dictionary """ # Build vocabulary word_counts = Counter(itertools.chain(*nested_list)) logging.info("build_vocab: word_counts=%d" % (len(word_counts))) # Mapping from index to label vocabulary_inv = [x[0] for x in word_counts.most_common()] # Mapping from label to index vocabulary = {x: i for i, x in enumerate(vocabulary_inv)} return vocabulary, vocabulary_inv def build_iters(data_dir, max_records, train_fraction, batch_size, buckets=None): """ Reads a csv of sentences/tag sequences into a pandas dataframe. Converts into X = array(list(int)) & Y = array(list(int)) Splits into training and test sets Builds dictionaries mapping from index labels to labels/ indexed features to features :param data_dir: directory to read in csv data from :param max_records: total number of records to randomly select from input data :param train_fraction: fraction of the data to use for training :param batch_size: records in mini-batches during training :param buckets: size of each bucket in the iterators :return: train_iter, val_iter, word_to_index, index_to_word, pos_to_index, index_to_pos """ # Read in data as numpy array df = pd.read_pickle(os.path.join(data_dir, "ner_data.pkl"))[:max_records] # Get feature lists entities=[list(array) for array in df["BILOU_tag"].values] sentences = [list(array) for array in df["token"].values] chars=[[[c for c in word] for word in sentence] for sentence in sentences] # Build vocabularies entity_to_index, index_to_entity = build_vocab(entities) word_to_index, index_to_word = build_vocab(sentences) char_to_index, index_to_char = build_vocab([np.array([c for c in word]) for word in index_to_word]) save_obj(entity_to_index, os.path.join(args.data_dir, "tag_to_index")) # Map strings to integer values indexed_entities=[list(map(entity_to_index.get, l)) for l in entities] indexed_tokens=[list(map(word_to_index.get, l)) for l in sentences] indexed_chars=[[list(map(char_to_index.get, word)) for word in sentence] for sentence in chars] # Split into training and testing data idx=int(len(indexed_tokens)*train_fraction) logging.info("Preparing train/test datasets splitting at idx %d on total %d sentences using a batchsize of %d", idx, len(indexed_tokens), batch_size) X_token_train, X_char_train, Y_train = indexed_tokens[:idx], indexed_chars[:idx], indexed_entities[:idx] X_token_test, X_char_test, Y_test = indexed_tokens[idx:], indexed_chars[idx:], indexed_entities[idx:] # build iterators to feed batches to network train_iter = iterators.BucketNerIter(sentences=X_token_train, characters=X_char_train, label=Y_train, max_token_chars=5, batch_size=batch_size, buckets=buckets) logging.info("Creating the val_iter using %d sentences", len(X_token_test)) val_iter = iterators.BucketNerIter(sentences=X_token_test, characters=X_char_test, label=Y_test, max_token_chars=train_iter.max_token_chars, batch_size=batch_size, buckets=train_iter.buckets) return train_iter, val_iter, word_to_index, char_to_index, entity_to_index def sym_gen(seq_len): """ Build NN symbol depending on the length of the input sequence """ sentence_shape = train_iter.provide_data[0][1] char_sentence_shape = train_iter.provide_data[1][1] entities_shape = train_iter.provide_label[0][1] X_sent = mx.symbol.Variable(train_iter.provide_data[0].name) X_char_sent = mx.symbol.Variable(train_iter.provide_data[1].name) Y = mx.sym.Variable(train_iter.provide_label[0].name) ############################### # Character embedding component ############################### char_embeddings = mx.sym.Embedding(data=X_char_sent, input_dim=len(char_to_index), output_dim=args.char_embed, name='char_embed') char_embeddings = mx.sym.reshape(data=char_embeddings, shape=(0,1,seq_len,-1,args.char_embed), name='char_embed2') char_cnn_outputs = [] for i, filter_size in enumerate(args.char_filter_list): # Kernel that slides over entire words resulting in a 1d output convi = mx.sym.Convolution(data=char_embeddings, kernel=(1, filter_size, args.char_embed), stride=(1, 1, 1), num_filter=args.char_filters, name="char_conv_layer_" + str(i)) acti = mx.sym.Activation(data=convi, act_type='tanh') pooli = mx.sym.Pooling(data=acti, pool_type='max', kernel=(1, char_sentence_shape[2] - filter_size + 1, 1), stride=(1, 1, 1), name="char_pool_layer_" + str(i)) pooli = mx.sym.transpose(mx.sym.Reshape(pooli, shape=(0, 0, 0)), axes=(0, 2, 1), name="cchar_conv_layer_" + str(i)) char_cnn_outputs.append(pooli) # combine features from all filters & apply dropout cnn_char_features = mx.sym.Concat(*char_cnn_outputs, dim=2, name="cnn_char_features") regularized_cnn_char_features = mx.sym.Dropout(data=cnn_char_features, p=args.dropout, mode='training', name='regularized charCnn features') ################################## # Combine char and word embeddings ################################## word_embeddings = mx.sym.Embedding(data=X_sent, input_dim=len(word_to_index), output_dim=args.word_embed, name='word_embed') rnn_features = mx.sym.Concat(*[word_embeddings, regularized_cnn_char_features], dim=2, name='rnn input') ############################## # Bidirectional LSTM component ############################## # unroll the lstm cell in time, merging outputs bi_cell.reset() output, states = bi_cell.unroll(length=seq_len, inputs=rnn_features, merge_outputs=True) # Map to num entity classes rnn_output = mx.sym.Reshape(output, shape=(-1, args.lstm_state_size * 2), name='r_output') fc = mx.sym.FullyConnected(data=rnn_output, num_hidden=len(entity_to_index), name='fc_layer') # reshape back to same shape as loss will be reshaped_fc = mx.sym.transpose(mx.sym.reshape(fc, shape=(-1, seq_len, len(entity_to_index))), axes=(0, 2, 1)) sm = mx.sym.SoftmaxOutput(data=reshaped_fc, label=Y, ignore_label=-1, use_ignore=True, multi_output=True, name='softmax') return sm, [v.name for v in train_iter.provide_data], [v.name for v in train_iter.provide_label] def train(train_iter, val_iter): import metrics devs = mx.cpu() if args.gpus is None or args.gpus is '' else [mx.gpu(int(i)) for i in args.gpus.split(',')] logging.info("train on device %s using optimizer %s at learningrate %f for %d epochs using %d records: lstm_state_size=%d ...", devs, args.optimizer, args.lr, args.num_epochs, args.max_records, args.lstm_state_size) module = mx.mod.BucketingModule(sym_gen, train_iter.default_bucket_key, context=devs) module.fit(train_data=train_iter, eval_data=val_iter, eval_metric=metrics.composite_classifier_metrics(), optimizer=args.optimizer, optimizer_params={'learning_rate': args.lr }, initializer=mx.initializer.Uniform(0.1), num_epoch=args.num_epochs, epoch_end_callback=save_model()) if __name__ == '__main__': # parse args args = parser.parse_args() args.buckets = list(map(int, args.buckets.split(','))) if len(args.buckets) > 0 else None args.char_filter_list = list(map(int, args.char_filter_list.split(','))) # Build data iterators train_iter, val_iter, word_to_index, char_to_index, entity_to_index = build_iters(args.data_dir, args.max_records, args.train_fraction, args.batch_size, args.buckets) logging.info("validation iterator: %s", val_iter) # Define the recurrent layer bi_cell = mx.rnn.SequentialRNNCell() for layer_num in range(args.lstm_layers): bi_cell.add(mx.rnn.BidirectionalCell( mx.rnn.LSTMCell(num_hidden=args.lstm_state_size, prefix="forward_layer_" + str(layer_num)), mx.rnn.LSTMCell(num_hidden=args.lstm_state_size, prefix="backward_layer_" + str(layer_num)))) bi_cell.add(mx.rnn.DropoutCell(args.dropout)) train(train_iter, val_iter)
apache-2.0
googleinterns/cabby
cabby/model/datasets.py
1
4391
# coding=utf-8 # Copyright 2020 Google LLC # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from absl import logging import os import pandas as pd from sklearn.utils import shuffle from cabby.geo import regions from cabby.geo import util as gutil class RUNDataset: def __init__(self, data_dir: str, s2level: int, lines: bool = False): train_ds, valid_ds, test_ds, ds = self.load_data(data_dir, lines=lines) # Get labels. map_1 = regions.get_region("RUN-map1") map_2 = regions.get_region("RUN-map2") map_3 = regions.get_region("RUN-map3") logging.info(map_1.polygon.wkt) logging.info(map_2.polygon.wkt) logging.info(map_3.polygon.wkt) unique_cellid_map_1 = gutil.cellids_from_polygon(map_1.polygon, s2level) unique_cellid_map_2 = gutil.cellids_from_polygon(map_2.polygon, s2level) unique_cellid_map_3 = gutil.cellids_from_polygon(map_3.polygon, s2level) unique_cellid = ( unique_cellid_map_1 + unique_cellid_map_2 + unique_cellid_map_3) label_to_cellid = {idx: cellid for idx, cellid in enumerate(unique_cellid)} cellid_to_label = {cellid: idx for idx, cellid in enumerate(unique_cellid)} self.train = train_ds self.valid = valid_ds self.test = test_ds self.ds = ds self.unique_cellid = unique_cellid self.label_to_cellid = label_to_cellid self.cellid_to_label = cellid_to_label def load_data(self, data_dir: str, lines: bool): ds = pd.read_json(os.path.join(data_dir, 'dataset.json'), lines=lines) ds['instructions'] = ds.groupby( ['id'])['instruction'].transform(lambda x: ' '.join(x)) ds = ds.drop_duplicates(subset='id', keep="last") columns_keep = ds.columns.difference( ['map', 'id', 'instructions', 'end_point', 'start_point']) ds.drop(columns_keep, 1, inplace=True) ds = shuffle(ds) ds.reset_index(inplace=True, drop=True) dataset_size = ds.shape[0] logging.info(f"Size of dataset: {ds.shape[0]}") train_size = round(dataset_size * 80 / 100) valid_size = round(dataset_size * 10 / 100) train_ds = ds.iloc[:train_size] valid_ds = ds.iloc[train_size:train_size + valid_size] test_ds = ds.iloc[train_size + valid_size:] return train_ds, valid_ds, test_ds, ds class RVSDataset: def __init__(self, data_dir: str, s2level: int, region: str, lines: bool = True): ds = pd.read_json(os.path.join(data_dir, 'dataset.json'), lines=lines) logging.info(f"Size of dataset before removal of duplication: {ds.shape[0]}") ds = pd.concat([ds.drop(['geo_landmarks'], axis=1), ds['geo_landmarks'].apply(pd.Series)], axis=1) lengths = ds.end_point.apply(lambda x: x if len(x) == 3 else "").tolist() ds['end_osmid'] = ds.end_point.apply(lambda x: x[1]) ds['start_osmid'] = ds.start_point.apply(lambda x: x[1]) ds['end_pivot'] = ds.end_point ds['end_point'] = ds.end_point.apply(lambda x: x[3]) ds['start_point'] = ds.start_point.apply(lambda x: x[3]) ds = ds.drop_duplicates(subset=['end_osmid', 'start_osmid'], keep='last') logging.info(f"Size of dataset after removal of duplication: {ds.shape[0]}") dataset_size = ds.shape[0] train_size = round(dataset_size * 80 / 100) valid_size = round(dataset_size * 10 / 100) train_ds = ds.iloc[:train_size] valid_ds = ds.iloc[train_size:train_size + valid_size] test_ds = ds.iloc[train_size + valid_size:] # Get labels. active_region = regions.get_region(region) unique_cellid = gutil.cellids_from_polygon(active_region.polygon, s2level) label_to_cellid = {idx: cellid for idx, cellid in enumerate(unique_cellid)} cellid_to_label = {cellid: idx for idx, cellid in enumerate(unique_cellid)} self.train = train_ds self.valid = valid_ds self.test = test_ds self.unique_cellid = unique_cellid self.label_to_cellid = label_to_cellid self.cellid_to_label = cellid_to_label
apache-2.0
BiaDarkia/scikit-learn
examples/semi_supervised/plot_label_propagation_digits_active_learning.py
33
4174
""" ======================================== Label Propagation digits active learning ======================================== Demonstrates an active learning technique to learn handwritten digits using label propagation. We start by training a label propagation model with only 10 labeled points, then we select the top five most uncertain points to label. Next, we train with 15 labeled points (original 10 + 5 new ones). We repeat this process four times to have a model trained with 30 labeled examples. Note you can increase this to label more than 30 by changing `max_iterations`. Labeling more than 30 can be useful to get a sense for the speed of convergence of this active learning technique. A plot will appear showing the top 5 most uncertain digits for each iteration of training. These may or may not contain mistakes, but we will train the next model with their true labels. """ print(__doc__) # Authors: Clay Woolam <clay@woolam.org> # License: BSD import numpy as np import matplotlib.pyplot as plt from scipy import stats from sklearn import datasets from sklearn.semi_supervised import label_propagation from sklearn.metrics import classification_report, confusion_matrix digits = datasets.load_digits() rng = np.random.RandomState(0) indices = np.arange(len(digits.data)) rng.shuffle(indices) X = digits.data[indices[:330]] y = digits.target[indices[:330]] images = digits.images[indices[:330]] n_total_samples = len(y) n_labeled_points = 10 max_iterations = 5 unlabeled_indices = np.arange(n_total_samples)[n_labeled_points:] f = plt.figure() for i in range(max_iterations): if len(unlabeled_indices) == 0: print("No unlabeled items left to label.") break y_train = np.copy(y) y_train[unlabeled_indices] = -1 lp_model = label_propagation.LabelSpreading(gamma=0.25, max_iter=5) lp_model.fit(X, y_train) predicted_labels = lp_model.transduction_[unlabeled_indices] true_labels = y[unlabeled_indices] cm = confusion_matrix(true_labels, predicted_labels, labels=lp_model.classes_) print("Iteration %i %s" % (i, 70 * "_")) print("Label Spreading model: %d labeled & %d unlabeled (%d total)" % (n_labeled_points, n_total_samples - n_labeled_points, n_total_samples)) print(classification_report(true_labels, predicted_labels)) print("Confusion matrix") print(cm) # compute the entropies of transduced label distributions pred_entropies = stats.distributions.entropy( lp_model.label_distributions_.T) # select up to 5 digit examples that the classifier is most uncertain about uncertainty_index = np.argsort(pred_entropies)[::-1] uncertainty_index = uncertainty_index[ np.in1d(uncertainty_index, unlabeled_indices)][:5] # keep track of indices that we get labels for delete_indices = np.array([]) # for more than 5 iterations, visualize the gain only on the first 5 if i < 5: f.text(.05, (1 - (i + 1) * .183), "model %d\n\nfit with\n%d labels" % ((i + 1), i * 5 + 10), size=10) for index, image_index in enumerate(uncertainty_index): image = images[image_index] # for more than 5 iterations, visualize the gain only on the first 5 if i < 5: sub = f.add_subplot(5, 5, index + 1 + (5 * i)) sub.imshow(image, cmap=plt.cm.gray_r, interpolation='none') sub.set_title("predict: %i\ntrue: %i" % ( lp_model.transduction_[image_index], y[image_index]), size=10) sub.axis('off') # labeling 5 points, remote from labeled set delete_index, = np.where(unlabeled_indices == image_index) delete_indices = np.concatenate((delete_indices, delete_index)) unlabeled_indices = np.delete(unlabeled_indices, delete_indices) n_labeled_points += len(uncertainty_index) f.suptitle("Active learning with Label Propagation.\nRows show 5 most " "uncertain labels to learn with the next model.", y=1.15) plt.subplots_adjust(left=0.2, bottom=0.03, right=0.9, top=0.9, wspace=0.2, hspace=0.85) plt.show()
bsd-3-clause
tmhm/scikit-learn
examples/plot_kernel_approximation.py
262
8004
""" ================================================== Explicit feature map approximation for RBF kernels ================================================== An example illustrating the approximation of the feature map of an RBF kernel. .. currentmodule:: sklearn.kernel_approximation It shows how to use :class:`RBFSampler` and :class:`Nystroem` to approximate the feature map of an RBF kernel for classification with an SVM on the digits dataset. Results using a linear SVM in the original space, a linear SVM using the approximate mappings and using a kernelized SVM are compared. Timings and accuracy for varying amounts of Monte Carlo samplings (in the case of :class:`RBFSampler`, which uses random Fourier features) and different sized subsets of the training set (for :class:`Nystroem`) for the approximate mapping are shown. Please note that the dataset here is not large enough to show the benefits of kernel approximation, as the exact SVM is still reasonably fast. Sampling more dimensions clearly leads to better classification results, but comes at a greater cost. This means there is a tradeoff between runtime and accuracy, given by the parameter n_components. Note that solving the Linear SVM and also the approximate kernel SVM could be greatly accelerated by using stochastic gradient descent via :class:`sklearn.linear_model.SGDClassifier`. This is not easily possible for the case of the kernelized SVM. The second plot visualized the decision surfaces of the RBF kernel SVM and the linear SVM with approximate kernel maps. The plot shows decision surfaces of the classifiers projected onto the first two principal components of the data. This visualization should be taken with a grain of salt since it is just an interesting slice through the decision surface in 64 dimensions. In particular note that a datapoint (represented as a dot) does not necessarily be classified into the region it is lying in, since it will not lie on the plane that the first two principal components span. The usage of :class:`RBFSampler` and :class:`Nystroem` is described in detail in :ref:`kernel_approximation`. """ print(__doc__) # Author: Gael Varoquaux <gael dot varoquaux at normalesup dot org> # Andreas Mueller <amueller@ais.uni-bonn.de> # License: BSD 3 clause # Standard scientific Python imports import matplotlib.pyplot as plt import numpy as np from time import time # Import datasets, classifiers and performance metrics from sklearn import datasets, svm, pipeline from sklearn.kernel_approximation import (RBFSampler, Nystroem) from sklearn.decomposition import PCA # The digits dataset digits = datasets.load_digits(n_class=9) # To apply an classifier on this data, we need to flatten the image, to # turn the data in a (samples, feature) matrix: n_samples = len(digits.data) data = digits.data / 16. data -= data.mean(axis=0) # We learn the digits on the first half of the digits data_train, targets_train = data[:n_samples / 2], digits.target[:n_samples / 2] # Now predict the value of the digit on the second half: data_test, targets_test = data[n_samples / 2:], digits.target[n_samples / 2:] #data_test = scaler.transform(data_test) # Create a classifier: a support vector classifier kernel_svm = svm.SVC(gamma=.2) linear_svm = svm.LinearSVC() # create pipeline from kernel approximation # and linear svm feature_map_fourier = RBFSampler(gamma=.2, random_state=1) feature_map_nystroem = Nystroem(gamma=.2, random_state=1) fourier_approx_svm = pipeline.Pipeline([("feature_map", feature_map_fourier), ("svm", svm.LinearSVC())]) nystroem_approx_svm = pipeline.Pipeline([("feature_map", feature_map_nystroem), ("svm", svm.LinearSVC())]) # fit and predict using linear and kernel svm: kernel_svm_time = time() kernel_svm.fit(data_train, targets_train) kernel_svm_score = kernel_svm.score(data_test, targets_test) kernel_svm_time = time() - kernel_svm_time linear_svm_time = time() linear_svm.fit(data_train, targets_train) linear_svm_score = linear_svm.score(data_test, targets_test) linear_svm_time = time() - linear_svm_time sample_sizes = 30 * np.arange(1, 10) fourier_scores = [] nystroem_scores = [] fourier_times = [] nystroem_times = [] for D in sample_sizes: fourier_approx_svm.set_params(feature_map__n_components=D) nystroem_approx_svm.set_params(feature_map__n_components=D) start = time() nystroem_approx_svm.fit(data_train, targets_train) nystroem_times.append(time() - start) start = time() fourier_approx_svm.fit(data_train, targets_train) fourier_times.append(time() - start) fourier_score = fourier_approx_svm.score(data_test, targets_test) nystroem_score = nystroem_approx_svm.score(data_test, targets_test) nystroem_scores.append(nystroem_score) fourier_scores.append(fourier_score) # plot the results: plt.figure(figsize=(8, 8)) accuracy = plt.subplot(211) # second y axis for timeings timescale = plt.subplot(212) accuracy.plot(sample_sizes, nystroem_scores, label="Nystroem approx. kernel") timescale.plot(sample_sizes, nystroem_times, '--', label='Nystroem approx. kernel') accuracy.plot(sample_sizes, fourier_scores, label="Fourier approx. kernel") timescale.plot(sample_sizes, fourier_times, '--', label='Fourier approx. kernel') # horizontal lines for exact rbf and linear kernels: accuracy.plot([sample_sizes[0], sample_sizes[-1]], [linear_svm_score, linear_svm_score], label="linear svm") timescale.plot([sample_sizes[0], sample_sizes[-1]], [linear_svm_time, linear_svm_time], '--', label='linear svm') accuracy.plot([sample_sizes[0], sample_sizes[-1]], [kernel_svm_score, kernel_svm_score], label="rbf svm") timescale.plot([sample_sizes[0], sample_sizes[-1]], [kernel_svm_time, kernel_svm_time], '--', label='rbf svm') # vertical line for dataset dimensionality = 64 accuracy.plot([64, 64], [0.7, 1], label="n_features") # legends and labels accuracy.set_title("Classification accuracy") timescale.set_title("Training times") accuracy.set_xlim(sample_sizes[0], sample_sizes[-1]) accuracy.set_xticks(()) accuracy.set_ylim(np.min(fourier_scores), 1) timescale.set_xlabel("Sampling steps = transformed feature dimension") accuracy.set_ylabel("Classification accuracy") timescale.set_ylabel("Training time in seconds") accuracy.legend(loc='best') timescale.legend(loc='best') # visualize the decision surface, projected down to the first # two principal components of the dataset pca = PCA(n_components=8).fit(data_train) X = pca.transform(data_train) # Gemerate grid along first two principal components multiples = np.arange(-2, 2, 0.1) # steps along first component first = multiples[:, np.newaxis] * pca.components_[0, :] # steps along second component second = multiples[:, np.newaxis] * pca.components_[1, :] # combine grid = first[np.newaxis, :, :] + second[:, np.newaxis, :] flat_grid = grid.reshape(-1, data.shape[1]) # title for the plots titles = ['SVC with rbf kernel', 'SVC (linear kernel)\n with Fourier rbf feature map\n' 'n_components=100', 'SVC (linear kernel)\n with Nystroem rbf feature map\n' 'n_components=100'] plt.tight_layout() plt.figure(figsize=(12, 5)) # predict and plot for i, clf in enumerate((kernel_svm, nystroem_approx_svm, fourier_approx_svm)): # Plot the decision boundary. For that, we will assign a color to each # point in the mesh [x_min, m_max]x[y_min, y_max]. plt.subplot(1, 3, i + 1) Z = clf.predict(flat_grid) # Put the result into a color plot Z = Z.reshape(grid.shape[:-1]) plt.contourf(multiples, multiples, Z, cmap=plt.cm.Paired) plt.axis('off') # Plot also the training points plt.scatter(X[:, 0], X[:, 1], c=targets_train, cmap=plt.cm.Paired) plt.title(titles[i]) plt.tight_layout() plt.show()
bsd-3-clause
loganlinn/mlia
resources/Ch10/kMeans.py
3
6419
''' Created on Feb 16, 2011 k Means Clustering for Ch10 of Machine Learning in Action @author: Peter Harrington ''' from numpy import * def loadDataSet(fileName): #general function to parse tab -delimited floats dataMat = [] #assume last column is target value fr = open(fileName) for line in fr.readlines(): curLine = line.strip().split('\t') fltLine = map(float,curLine) #map all elements to float() dataMat.append(fltLine) return dataMat def distEclud(vecA, vecB): return sqrt(sum(power(vecA - vecB, 2))) #la.norm(vecA-vecB) def randCent(dataSet, k): n = shape(dataSet)[1] centroids = mat(zeros((k,n)))#create centroid mat for j in range(n):#create random cluster centers, within bounds of each dimension minJ = min(dataSet[:,j]) rangeJ = float(max(dataSet[:,j]) - minJ) centroids[:,j] = mat(minJ + rangeJ * random.rand(k,1)) return centroids def kMeans(dataSet, k, distMeas=distEclud, createCent=randCent): m = shape(dataSet)[0] clusterAssment = mat(zeros((m,2)))#create mat to assign data points #to a centroid, also holds SE of each point centroids = createCent(dataSet, k) clusterChanged = True while clusterChanged: clusterChanged = False for i in range(m):#for each data point assign it to the closest centroid minDist = inf; minIndex = -1 for j in range(k): distJI = distMeas(centroids[j,:],dataSet[i,:]) if distJI < minDist: minDist = distJI; minIndex = j if clusterAssment[i,0] != minIndex: clusterChanged = True clusterAssment[i,:] = minIndex,minDist**2 print centroids for cent in range(k):#recalculate centroids ptsInClust = dataSet[nonzero(clusterAssment[:,0].A==cent)[0]]#get all the point in this cluster centroids[cent,:] = mean(ptsInClust, axis=0) #assign centroid to mean return centroids, clusterAssment def biKmeans(dataSet, k, distMeas=distEclud): m = shape(dataSet)[0] clusterAssment = mat(zeros((m,2))) centroid0 = mean(dataSet, axis=0).tolist()[0] centList =[centroid0] #create a list with one centroid for j in range(m):#calc initial Error clusterAssment[j,1] = distMeas(mat(centroid0), dataSet[j,:])**2 while (len(centList) < k): lowestSSE = inf for i in range(len(centList)): ptsInCurrCluster = dataSet[nonzero(clusterAssment[:,0].A==i)[0],:]#get the data points currently in cluster i centroidMat, splitClustAss = kMeans(ptsInCurrCluster, 2, distMeas) sseSplit = sum(splitClustAss[:,1])#compare the SSE to the currrent minimum sseNotSplit = sum(clusterAssment[nonzero(clusterAssment[:,0].A!=i)[0],1]) print "sseSplit, and notSplit: ",sseSplit,sseNotSplit if (sseSplit + sseNotSplit) < lowestSSE: bestCentToSplit = i bestNewCents = centroidMat bestClustAss = splitClustAss.copy() lowestSSE = sseSplit + sseNotSplit bestClustAss[nonzero(bestClustAss[:,0].A == 1)[0],0] = len(centList) #change 1 to 3,4, or whatever bestClustAss[nonzero(bestClustAss[:,0].A == 0)[0],0] = bestCentToSplit print 'the bestCentToSplit is: ',bestCentToSplit print 'the len of bestClustAss is: ', len(bestClustAss) centList[bestCentToSplit] = bestNewCents[0,:].tolist()[0]#replace a centroid with two best centroids centList.append(bestNewCents[1,:].tolist()[0]) clusterAssment[nonzero(clusterAssment[:,0].A == bestCentToSplit)[0],:]= bestClustAss#reassign new clusters, and SSE return mat(centList), clusterAssment import urllib import json def geoGrab(stAddress, city): apiStem = 'http://where.yahooapis.com/geocode?' #create a dict and constants for the goecoder params = {} params['flags'] = 'J'#JSON return type params['appid'] = 'aaa0VN6k' params['location'] = '%s %s' % (stAddress, city) url_params = urllib.urlencode(params) yahooApi = apiStem + url_params #print url_params print yahooApi c=urllib.urlopen(yahooApi) return json.loads(c.read()) from time import sleep def massPlaceFind(fileName): fw = open('places.txt', 'w') for line in open(fileName).readlines(): line = line.strip() lineArr = line.split('\t') retDict = geoGrab(lineArr[1], lineArr[2]) if retDict['ResultSet']['Error'] == 0: lat = float(retDict['ResultSet']['Results'][0]['latitude']) lng = float(retDict['ResultSet']['Results'][0]['longitude']) print "%s\t%f\t%f" % (lineArr[0], lat, lng) fw.write('%s\t%f\t%f\n' % (line, lat, lng)) else: print "error fetching" sleep(1) fw.close() def distSLC(vecA, vecB):#Spherical Law of Cosines a = sin(vecA[0,1]*pi/180) * sin(vecB[0,1]*pi/180) b = cos(vecA[0,1]*pi/180) * cos(vecB[0,1]*pi/180) * \ cos(pi * (vecB[0,0]-vecA[0,0]) /180) return arccos(a + b)*6371.0 #pi is imported with numpy import matplotlib import matplotlib.pyplot as plt def clusterClubs(numClust=5): datList = [] for line in open('places.txt').readlines(): lineArr = line.split('\t') datList.append([float(lineArr[4]), float(lineArr[3])]) datMat = mat(datList) myCentroids, clustAssing = biKmeans(datMat, numClust, distMeas=distSLC) fig = plt.figure() rect=[0.1,0.1,0.8,0.8] scatterMarkers=['s', 'o', '^', '8', 'p', \ 'd', 'v', 'h', '>', '<'] axprops = dict(xticks=[], yticks=[]) ax0=fig.add_axes(rect, label='ax0', **axprops) imgP = plt.imread('Portland.png') ax0.imshow(imgP) ax1=fig.add_axes(rect, label='ax1', frameon=False) for i in range(numClust): ptsInCurrCluster = datMat[nonzero(clustAssing[:,0].A==i)[0],:] markerStyle = scatterMarkers[i % len(scatterMarkers)] ax1.scatter(ptsInCurrCluster[:,0].flatten().A[0], ptsInCurrCluster[:,1].flatten().A[0], marker=markerStyle, s=90) ax1.scatter(myCentroids[:,0].flatten().A[0], myCentroids[:,1].flatten().A[0], marker='+', s=300) plt.show()
epl-1.0
YoungKwonJo/mlxtend
tests/tests_evaluate/test_learning_curves.py
1
2212
from mlxtend.evaluate import plot_learning_curves from sklearn import datasets from sklearn.cross_validation import train_test_split from sklearn.tree import DecisionTreeClassifier import numpy as np def test_training_size(): iris = datasets.load_iris() X = iris.data y = iris.target X_train, X_test, y_train, y_test = train_test_split(X, y, train_size=0.6, random_state=2) clf = DecisionTreeClassifier(max_depth=1, random_state=1) training_errors, test_errors = plot_learning_curves(X_train, y_train, X_test, y_test, clf, kind='training_size', suppress_plot=True) desired1 = [0.32, 0.33, 0.32, 0.33, 0.30, 0.31, 0.31, 0.22, 0.22, 0.22] desired2 = [0.35, 0.35, 0.35, 0.35, 0.43, 0.45, 0.35, 0.35, 0.45, 0.45] np.testing.assert_almost_equal(training_errors, desired1, decimal=2) np.testing.assert_almost_equal(test_errors, desired2, decimal=2) def test_scikit_metrics(): iris = datasets.load_iris() X = iris.data y = iris.target X_train, X_test, y_train, y_test = train_test_split(X, y, train_size=0.6, random_state=2) clf = DecisionTreeClassifier(max_depth=1, random_state=1) training_errors, test_errors = plot_learning_curves(X_train, y_train, X_test, y_test, clf, kind='training_size', suppress_plot=True, scoring='accuracy') desired1 = [0.68, 0.67, 0.68, 0.67, 0.7, 0.69, 0.69, 0.78, 0.78, 0.78] desired2 = [0.65, 0.65, 0.65, 0.65, 0.57, 0.55, 0.65, 0.65, 0.55, 0.55] np.testing.assert_almost_equal(training_errors, desired1, decimal=2) np.testing.assert_almost_equal(test_errors, desired2, decimal=2) def test_n_features(): iris = datasets.load_iris() X = iris.data y = iris.target X_train, X_test, y_train, y_test = train_test_split(X, y, train_size=0.6, random_state=2) clf = DecisionTreeClassifier(max_depth=1, random_state=1) training_errors, test_errors = plot_learning_curves(X_train, y_train, X_test, y_test, clf, kind='n_features', suppress_plot=True) desired1 = [0.40, 0.40, 0.32, 0.32] desired2 = [0.42, 0.42, 0.35, 0.35] np.testing.assert_almost_equal(training_errors, desired1, decimal=2) np.testing.assert_almost_equal(test_errors, desired2, decimal=2)
bsd-3-clause
cactusbin/nyt
matplotlib/lib/matplotlib/tests/test_text.py
2
6893
from __future__ import print_function import numpy as np import matplotlib from matplotlib.testing.decorators import image_comparison, knownfailureif, cleanup import matplotlib.pyplot as plt import warnings from nose.tools import with_setup @image_comparison(baseline_images=['font_styles']) def test_font_styles(): from matplotlib import _get_data_path data_path = _get_data_path() def find_matplotlib_font(**kw): prop = FontProperties(**kw) path = findfont(prop, directory=data_path) return FontProperties(fname=path) from matplotlib.font_manager import FontProperties, findfont warnings.filterwarnings('ignore','findfont: Font family \[\'Foo\'\] '+ \ 'not found. Falling back to .', UserWarning, module='matplotlib.font_manager') fig = plt.figure() ax = plt.subplot( 1, 1, 1 ) normalFont = find_matplotlib_font( family = "sans-serif", style = "normal", variant = "normal", size = 14, ) ax.annotate( "Normal Font", (0.1, 0.1), xycoords='axes fraction', fontproperties = normalFont ) boldFont = find_matplotlib_font( family = "Foo", style = "normal", variant = "normal", weight = "bold", stretch = 500, size = 14, ) ax.annotate( "Bold Font", (0.1, 0.2), xycoords='axes fraction', fontproperties = boldFont ) boldItemFont = find_matplotlib_font( family = "sans serif", style = "italic", variant = "normal", weight = 750, stretch = 500, size = 14, ) ax.annotate( "Bold Italic Font", (0.1, 0.3), xycoords='axes fraction', fontproperties = boldItemFont ) lightFont = find_matplotlib_font( family = "sans-serif", style = "normal", variant = "normal", weight = 200, stretch = 500, size = 14, ) ax.annotate( "Light Font", (0.1, 0.4), xycoords='axes fraction', fontproperties = lightFont ) condensedFont = find_matplotlib_font( family = "sans-serif", style = "normal", variant = "normal", weight = 500, stretch = 100, size = 14, ) ax.annotate( "Condensed Font", (0.1, 0.5), xycoords='axes fraction', fontproperties = condensedFont ) ax.set_xticks([]) ax.set_yticks([]) @image_comparison(baseline_images=['multiline']) def test_multiline(): fig = plt.figure() ax = plt.subplot(1, 1, 1) ax.set_title("multiline\ntext alignment") plt.text(0.2, 0.5, "TpTpTp\n$M$\nTpTpTp", size=20, ha="center", va="top") plt.text(0.5, 0.5, "TpTpTp\n$M^{M^{M^{M}}}$\nTpTpTp", size=20, ha="center", va="top") plt.text(0.8, 0.5, "TpTpTp\n$M_{q_{q_{q}}}$\nTpTpTp", size=20, ha="center", va="top") plt.xlim(0, 1) plt.ylim(0, 0.8) ax.set_xticks([]) ax.set_yticks([]) @image_comparison(baseline_images=['antialiased'], extensions=['png']) def test_antialiasing(): matplotlib.rcParams['text.antialiased'] = True fig = plt.figure(figsize=(5.25, 0.75)) fig.text(0.5, 0.75, "antialiased", horizontalalignment='center', verticalalignment='center') fig.text(0.5, 0.25, "$\sqrt{x}$", horizontalalignment='center', verticalalignment='center') # NOTE: We don't need to restore the rcParams here, because the # test cleanup will do it for us. In fact, if we do it here, it # will turn antialiasing back off before the images are actually # rendered. def test_afm_kerning(): from matplotlib.afm import AFM from matplotlib.font_manager import findfont fn = findfont("Helvetica", fontext="afm") with open(fn, 'rb') as fh: afm = AFM(fh) assert afm.string_width_height('VAVAVAVAVAVA') == (7174.0, 718) @image_comparison(baseline_images=['text_contains'], extensions=['png']) def test_contains(): import matplotlib.backend_bases as mbackend fig = plt.figure() ax = plt.axes() mevent = mbackend.MouseEvent('button_press_event', fig.canvas, 0.5, 0.5, 1, None) xs = np.linspace(0.25, 0.75, 30) ys = np.linspace(0.25, 0.75, 30) xs, ys = np.meshgrid(xs, ys) txt = plt.text(0.48, 0.52, 'hello world', ha='center', fontsize=30, rotation=30) # uncomment to draw the text's bounding box # txt.set_bbox(dict(edgecolor='black', facecolor='none')) # draw the text. This is important, as the contains method can only work # when a renderer exists. plt.draw() for x, y in zip(xs.flat, ys.flat): mevent.x, mevent.y = plt.gca().transAxes.transform_point([x, y]) contains, _ = txt.contains(mevent) color = 'yellow' if contains else 'red' # capture the viewLim, plot a point, and reset the viewLim vl = ax.viewLim.frozen() ax.plot(x, y, 'o', color=color) ax.viewLim.set(vl) @image_comparison(baseline_images=['titles']) def test_titles(): # left and right side titles fig = plt.figure() ax = plt.subplot(1, 1, 1) ax.set_title("left title", loc="left") ax.set_title("right title", loc="right") ax.set_xticks([]) ax.set_yticks([]) @image_comparison(baseline_images=['text_alignment']) def test_alignment(): fig = plt.figure() ax = plt.subplot(1, 1, 1) x = 0.1 for rotation in (0, 30): for alignment in ('top', 'bottom', 'baseline', 'center'): ax.text(x, 0.5, alignment + " Tj", va=alignment, rotation=rotation, bbox=dict(boxstyle='round', facecolor='wheat', alpha=0.5)) ax.text(x, 1.0, r'$\sum_{i=0}^{j}$', va=alignment, rotation=rotation) x += 0.1 ax.plot([0, 1], [0.5, 0.5]) ax.plot([0, 1], [1.0, 1.0]) ax.set_xlim([0, 1]) ax.set_ylim([0, 1.5]) ax.set_xticks([]) ax.set_yticks([])
unlicense
sumitsourabh/opencog
opencog/python/utility/functions.py
34
11056
from math import fabs, isnan from datetime import datetime from spatiotemporal.unix_time import UnixTime from utility.generic import convert_dict_to_sorted_lists from utility.numeric.globals import EPSILON from numpy import NINF as NEGATIVE_INFINITY, PINF as POSITIVE_INFINITY from scipy.integrate import quad __author__ = 'keyvan' def integral(function, start, end): if hasattr(function, 'integral'): return function.integral(start, end) area, error = quad(function, start, end) return area def almost_equals(a, b, epsilon=EPSILON): if fabs(a - b) < epsilon: return True return False def invoke_method_on(method, sequence_or_point): if method is None: return None if not callable(method): raise TypeError("'method' is not callable") result = [] try: for point in sequence_or_point: if type(point) is datetime: point = UnixTime(point) result.append(method(point)) except TypeError: if type(sequence_or_point) is datetime: sequence_or_point = UnixTime(sequence_or_point) return method(sequence_or_point) return result def index_of_first_local_maximum(sequence): first_time = True index = 0 for element in sequence: if first_time: previous = element first_time = False continue if element <= previous: return index previous = element index += 1 return None class Function(object): _domain = None _range = None _function_undefined = None def __init__(self, function_undefined=None, domain=None): if function_undefined is not None: self.function_undefined = function_undefined if domain is not None: if not hasattr(domain, '__iter__') or not hasattr(domain, '__getitem__'): raise TypeError("'domain' should be iterable and support indexing") self._domain = domain def call_on_single_point(self, x): """ to override, __call__ invokes this to handle both points and sequences """ return 0 def derivative(self, point): return None def _check_domain_for(self, feature_name): if self.domain is None: raise TypeError("'{0}' object does not support {1}, 'domain' should be specified".format( self.__class__.__name__, feature_name)) def plot(self, plt=None): self._check_domain_for('plotting') if plt is None: import matplotlib.pyplot as plt plt.plot(self.domain, self.range) return plt @property def function_undefined(self): return self._function_undefined @function_undefined.setter def function_undefined(self, value): if value is not None and not isinstance(value, Function): raise TypeError("'function_undefined' should be of type 'Function'") self._function_undefined = value @property def domain(self): return self._domain @property def range(self): return self() def __call__(self, x=None): if x is None: self._check_domain_for("call with 'None'") x = self.domain return invoke_method_on(self.call_on_single_point, x) def __getitem__(self, index): self._check_domain_for('indexing') return self.range[index] def __len__(self): self._check_domain_for('len()') return len(self.range) def __iter__(self): self._check_domain_for('iter()') return iter(self.range) def __reversed__(self): self._check_domain_for('reversed()') return reversed(self.range) class FunctionLinear(Function): def __init__(self, a=None, b=None, x_0=None, y_0=None, x_1=None, y_1=None): #(x_0, y_0), (x_1, y_1) = sorted([(x_0, y_0), (x_1, y_1)]) if (a, b) == (None, None): a = (float(y_1) - y_0) / (x_1 - x_0) b = y_0 - a * x_0 if isnan(a) or isnan(b): pass self.a = a self.b = b def call_on_single_point(self, x): return float(self.a * x + self.b) def intersect(self, other): if almost_equals(self.a, other.a): return None x = (float(other.b) - self.b) / (self.a - other.a) return x, self(x) def integral(self, start, end): if start >= end: return 0 if self.a == 0: return self.b * (end - start) x_intercept = self.x_intercept if start > x_intercept or end < x_intercept or almost_equals(end, x_intercept) or almost_equals(start, x_intercept): return (self(start) + self(end)) * (end - start) / 2.0 minus_triangle = (x_intercept - start) * self(start) plus_triangle = (end - x_intercept) * self(end) return minus_triangle + plus_triangle def derivative(self, point): return self.a @property def x_intercept(self): return - float(self.b) / self.a @property def y_intercept(self): return self(0) class FunctionHorizontalLinear(FunctionLinear): def __init__(self, y_intercept): FunctionLinear.__init__(self, a=0, b=y_intercept) def call_on_single_point(self, x): return self.b def integral(self, start, end): if start >= end: return 0 if almost_equals(self.b, 0): return 0 return float(self.b) * (end - start) def derivative(self, point): return 0 FUNCTION_ZERO = FunctionHorizontalLinear(0) FUNCTION_ONE = FunctionHorizontalLinear(1) class FunctionComposite(Function): is_normalised = False def __init__(self, dictionary_bounds_function, function_undefined=None, domain=None, is_normalised=False): if is_normalised is not False: self.is_normalised = True Function.__init__(self, function_undefined=function_undefined, domain=domain) if not isinstance(dictionary_bounds_function, dict): raise TypeError("'dictionary_bounds_function' should be a dictionary with (lower_bound, higher_bound) " "tuple keys and values of type 'Function'") self._dictionary_bounds_function = dictionary_bounds_function def call_on_single_point(self, x): for function_bounds in self.dictionary_bounds_function: (a, b) = function_bounds if a <= x: if b >= x: if self.dictionary_bounds_function[function_bounds] is None: return None return self.dictionary_bounds_function[function_bounds](x) return self.function_undefined(x) def integral(self, start, end): if self.is_normalised and self.domain is not None: if (start < self.domain[0] or almost_equals(start, self.domain[0])) and ( end > self.domain[-1] or almost_equals(end, self.domain[-1])): return 1.0 if start >= end: return 0 result = 0 for function_bounds in self.dictionary_bounds_function: (a, b) = function_bounds if a <= start: if b >= end: return self.dictionary_bounds_function[function_bounds].integral(start, end) not_ordered = { (start, 0): 's', (end, 0): 'e', (a, 1): 'a', (b, 1): 'b' } order = ''.join([not_ordered[i] for i in sorted(not_ordered)]) if (a == start or a == end) and order == 'saeb' or (b == start or b == end) and order == 'asbe': continue if order in 'seab abse': continue if order == 'saeb': b = end elif order == 'asbe': a = start result += self.dictionary_bounds_function[function_bounds].integral(a, b) return result def find_bounds_for(self, point): for bounds in self.dictionary_bounds_function: (a, b) = bounds if a <= point and b >= point: return bounds def derivative(self, point): return self.dictionary_bounds_function[self.find_bounds_for(point)].derivative(point) def function_in_point(self, point): for bounds in self.dictionary_bounds_function: a, b = bounds if a <= point <= b: return self.dictionary_bounds_function[bounds] return None # def functions_in_interval(self, interval_start, interval_end): # dictionary_bounds_function = {} # for bounds in self.dictionary_bounds_function: # a, b = bounds # if (interval_start < a or almost_equals(interval_start, a)) and ( # # ): @property def dictionary_bounds_function(self): return self._dictionary_bounds_function class FunctionPiecewiseLinear(FunctionComposite): def __init__(self, dictionary_input_output, function_undefined=None, is_normalised=False): self.input_list, self.output_list = convert_dict_to_sorted_lists(dictionary_input_output) dictionary_bounds_function = {} for i in xrange(1, len(self.input_list)): x_0, x_1 = self.input_list[i - 1], self.input_list[i] y_0, y_1 = self.output_list[i - 1], self.output_list[i] dictionary_bounds_function[(x_0, x_1)] = FunctionLinear(x_0=x_0, x_1=x_1, y_0=y_0, y_1=y_1) if NEGATIVE_INFINITY not in self.input_list: dictionary_bounds_function[(NEGATIVE_INFINITY, self.input_list[0])] = function_undefined if POSITIVE_INFINITY not in self.input_list: dictionary_bounds_function[(self.input_list[-1], POSITIVE_INFINITY)] = function_undefined FunctionComposite.__init__(self, dictionary_bounds_function, function_undefined=function_undefined, domain=self.input_list, is_normalised=is_normalised) def normalised(self): area = self.integral(NEGATIVE_INFINITY, POSITIVE_INFINITY) if almost_equals(area, 0): area = self.integral(NEGATIVE_INFINITY, POSITIVE_INFINITY) dictionary_input_output = {} output_list = [y / area for y in self.output_list] for i in xrange(len(self.input_list)): dictionary_input_output[self.input_list[i]] = output_list[i] result = FunctionPiecewiseLinear(dictionary_input_output, function_undefined=self.function_undefined) result.is_normalised = True return result def __and__(self, other): for bounds in self.dictionary_bounds_function: a, b = bounds linear_function = self.dictionary_bounds_function[bounds] if __name__ == '__main__': a = FunctionLinear(1, 0) b = FunctionLinear(-1, 1) print a.intersect(b)
agpl-3.0
cl4rke/scikit-learn
sklearn/svm/tests/test_sparse.py
95
12156
from nose.tools import assert_raises, assert_true, assert_false import numpy as np from scipy import sparse from numpy.testing import (assert_array_almost_equal, assert_array_equal, assert_equal) from sklearn import datasets, svm, linear_model, base from sklearn.datasets import make_classification, load_digits, make_blobs from sklearn.svm.tests import test_svm from sklearn.utils import ConvergenceWarning from sklearn.utils.extmath import safe_sparse_dot from sklearn.utils.testing import assert_warns, assert_raise_message # test sample 1 X = np.array([[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]) X_sp = sparse.lil_matrix(X) Y = [1, 1, 1, 2, 2, 2] T = np.array([[-1, -1], [2, 2], [3, 2]]) true_result = [1, 2, 2] # test sample 2 X2 = np.array([[0, 0, 0], [1, 1, 1], [2, 0, 0, ], [0, 0, 2], [3, 3, 3]]) X2_sp = sparse.dok_matrix(X2) Y2 = [1, 2, 2, 2, 3] T2 = np.array([[-1, -1, -1], [1, 1, 1], [2, 2, 2]]) true_result2 = [1, 2, 3] iris = datasets.load_iris() # permute rng = np.random.RandomState(0) perm = rng.permutation(iris.target.size) iris.data = iris.data[perm] iris.target = iris.target[perm] # sparsify iris.data = sparse.csr_matrix(iris.data) def check_svm_model_equal(dense_svm, sparse_svm, X_train, y_train, X_test): dense_svm.fit(X_train.toarray(), y_train) if sparse.isspmatrix(X_test): X_test_dense = X_test.toarray() else: X_test_dense = X_test sparse_svm.fit(X_train, y_train) assert_true(sparse.issparse(sparse_svm.support_vectors_)) assert_true(sparse.issparse(sparse_svm.dual_coef_)) assert_array_almost_equal(dense_svm.support_vectors_, sparse_svm.support_vectors_.toarray()) assert_array_almost_equal(dense_svm.dual_coef_, sparse_svm.dual_coef_.toarray()) if dense_svm.kernel == "linear": assert_true(sparse.issparse(sparse_svm.coef_)) assert_array_almost_equal(dense_svm.coef_, sparse_svm.coef_.toarray()) assert_array_almost_equal(dense_svm.support_, sparse_svm.support_) assert_array_almost_equal(dense_svm.predict(X_test_dense), sparse_svm.predict(X_test)) assert_array_almost_equal(dense_svm.decision_function(X_test_dense), sparse_svm.decision_function(X_test)) assert_array_almost_equal(dense_svm.decision_function(X_test_dense), sparse_svm.decision_function(X_test_dense)) assert_array_almost_equal(dense_svm.predict_proba(X_test_dense), sparse_svm.predict_proba(X_test), 4) msg = "cannot use sparse input in 'SVC' trained on dense data" if sparse.isspmatrix(X_test): assert_raise_message(ValueError, msg, dense_svm.predict, X_test) def test_svc(): """Check that sparse SVC gives the same result as SVC""" # many class dataset: X_blobs, y_blobs = make_blobs(n_samples=100, centers=10, random_state=0) X_blobs = sparse.csr_matrix(X_blobs) datasets = [[X_sp, Y, T], [X2_sp, Y2, T2], [X_blobs[:80], y_blobs[:80], X_blobs[80:]], [iris.data, iris.target, iris.data]] kernels = ["linear", "poly", "rbf", "sigmoid"] for dataset in datasets: for kernel in kernels: clf = svm.SVC(kernel=kernel, probability=True, random_state=0) sp_clf = svm.SVC(kernel=kernel, probability=True, random_state=0) check_svm_model_equal(clf, sp_clf, *dataset) def test_unsorted_indices(): # test that the result with sorted and unsorted indices in csr is the same # we use a subset of digits as iris, blobs or make_classification didn't # show the problem digits = load_digits() X, y = digits.data[:50], digits.target[:50] X_test = sparse.csr_matrix(digits.data[50:100]) X_sparse = sparse.csr_matrix(X) coef_dense = svm.SVC(kernel='linear', probability=True, random_state=0).fit(X, y).coef_ sparse_svc = svm.SVC(kernel='linear', probability=True, random_state=0).fit(X_sparse, y) coef_sorted = sparse_svc.coef_ # make sure dense and sparse SVM give the same result assert_array_almost_equal(coef_dense, coef_sorted.toarray()) X_sparse_unsorted = X_sparse[np.arange(X.shape[0])] X_test_unsorted = X_test[np.arange(X_test.shape[0])] # make sure we scramble the indices assert_false(X_sparse_unsorted.has_sorted_indices) assert_false(X_test_unsorted.has_sorted_indices) unsorted_svc = svm.SVC(kernel='linear', probability=True, random_state=0).fit(X_sparse_unsorted, y) coef_unsorted = unsorted_svc.coef_ # make sure unsorted indices give same result assert_array_almost_equal(coef_unsorted.toarray(), coef_sorted.toarray()) assert_array_almost_equal(sparse_svc.predict_proba(X_test_unsorted), sparse_svc.predict_proba(X_test)) def test_svc_with_custom_kernel(): kfunc = lambda x, y: safe_sparse_dot(x, y.T) clf_lin = svm.SVC(kernel='linear').fit(X_sp, Y) clf_mylin = svm.SVC(kernel=kfunc).fit(X_sp, Y) assert_array_equal(clf_lin.predict(X_sp), clf_mylin.predict(X_sp)) def test_svc_iris(): # Test the sparse SVC with the iris dataset for k in ('linear', 'poly', 'rbf'): sp_clf = svm.SVC(kernel=k).fit(iris.data, iris.target) clf = svm.SVC(kernel=k).fit(iris.data.toarray(), iris.target) assert_array_almost_equal(clf.support_vectors_, sp_clf.support_vectors_.toarray()) assert_array_almost_equal(clf.dual_coef_, sp_clf.dual_coef_.toarray()) assert_array_almost_equal( clf.predict(iris.data.toarray()), sp_clf.predict(iris.data)) if k == 'linear': assert_array_almost_equal(clf.coef_, sp_clf.coef_.toarray()) def test_sparse_decision_function(): #Test decision_function #Sanity check, test that decision_function implemented in python #returns the same as the one in libsvm # multi class: clf = svm.SVC(kernel='linear', C=0.1).fit(iris.data, iris.target) dec = safe_sparse_dot(iris.data, clf.coef_.T) + clf.intercept_ assert_array_almost_equal(dec, clf.decision_function(iris.data)) # binary: clf.fit(X, Y) dec = np.dot(X, clf.coef_.T) + clf.intercept_ prediction = clf.predict(X) assert_array_almost_equal(dec.ravel(), clf.decision_function(X)) assert_array_almost_equal( prediction, clf.classes_[(clf.decision_function(X) > 0).astype(np.int).ravel()]) expected = np.array([-1., -0.66, -1., 0.66, 1., 1.]) assert_array_almost_equal(clf.decision_function(X), expected, 2) def test_error(): # Test that it gives proper exception on deficient input # impossible value of C assert_raises(ValueError, svm.SVC(C=-1).fit, X, Y) # impossible value of nu clf = svm.NuSVC(nu=0.0) assert_raises(ValueError, clf.fit, X_sp, Y) Y2 = Y[:-1] # wrong dimensions for labels assert_raises(ValueError, clf.fit, X_sp, Y2) clf = svm.SVC() clf.fit(X_sp, Y) assert_array_equal(clf.predict(T), true_result) def test_linearsvc(): # Similar to test_SVC clf = svm.LinearSVC(random_state=0).fit(X, Y) sp_clf = svm.LinearSVC(random_state=0).fit(X_sp, Y) assert_true(sp_clf.fit_intercept) assert_array_almost_equal(clf.coef_, sp_clf.coef_, decimal=4) assert_array_almost_equal(clf.intercept_, sp_clf.intercept_, decimal=4) assert_array_almost_equal(clf.predict(X), sp_clf.predict(X_sp)) clf.fit(X2, Y2) sp_clf.fit(X2_sp, Y2) assert_array_almost_equal(clf.coef_, sp_clf.coef_, decimal=4) assert_array_almost_equal(clf.intercept_, sp_clf.intercept_, decimal=4) def test_linearsvc_iris(): # Test the sparse LinearSVC with the iris dataset sp_clf = svm.LinearSVC(random_state=0).fit(iris.data, iris.target) clf = svm.LinearSVC(random_state=0).fit(iris.data.toarray(), iris.target) assert_equal(clf.fit_intercept, sp_clf.fit_intercept) assert_array_almost_equal(clf.coef_, sp_clf.coef_, decimal=1) assert_array_almost_equal(clf.intercept_, sp_clf.intercept_, decimal=1) assert_array_almost_equal( clf.predict(iris.data.toarray()), sp_clf.predict(iris.data)) # check decision_function pred = np.argmax(sp_clf.decision_function(iris.data), 1) assert_array_almost_equal(pred, clf.predict(iris.data.toarray())) # sparsify the coefficients on both models and check that they still # produce the same results clf.sparsify() assert_array_equal(pred, clf.predict(iris.data)) sp_clf.sparsify() assert_array_equal(pred, sp_clf.predict(iris.data)) def test_weight(): # Test class weights X_, y_ = make_classification(n_samples=200, n_features=100, weights=[0.833, 0.167], random_state=0) X_ = sparse.csr_matrix(X_) for clf in (linear_model.LogisticRegression(), svm.LinearSVC(random_state=0), svm.SVC()): clf.set_params(class_weight={0: 5}) clf.fit(X_[:180], y_[:180]) y_pred = clf.predict(X_[180:]) assert_true(np.sum(y_pred == y_[180:]) >= 11) def test_sample_weights(): # Test weights on individual samples clf = svm.SVC() clf.fit(X_sp, Y) assert_array_equal(clf.predict(X[2]), [1.]) sample_weight = [.1] * 3 + [10] * 3 clf.fit(X_sp, Y, sample_weight=sample_weight) assert_array_equal(clf.predict(X[2]), [2.]) def test_sparse_liblinear_intercept_handling(): # Test that sparse liblinear honours intercept_scaling param test_svm.test_dense_liblinear_intercept_handling(svm.LinearSVC) def test_sparse_realdata(): # Test on a subset from the 20newsgroups dataset. # This catchs some bugs if input is not correctly converted into # sparse format or weights are not correctly initialized. data = np.array([0.03771744, 0.1003567, 0.01174647, 0.027069]) indices = np.array([6, 5, 35, 31]) indptr = np.array( [0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 4, 4, 4]) X = sparse.csr_matrix((data, indices, indptr)) y = np.array( [1., 0., 2., 2., 1., 1., 1., 2., 2., 0., 1., 2., 2., 0., 2., 0., 3., 0., 3., 0., 1., 1., 3., 2., 3., 2., 0., 3., 1., 0., 2., 1., 2., 0., 1., 0., 2., 3., 1., 3., 0., 1., 0., 0., 2., 0., 1., 2., 2., 2., 3., 2., 0., 3., 2., 1., 2., 3., 2., 2., 0., 1., 0., 1., 2., 3., 0., 0., 2., 2., 1., 3., 1., 1., 0., 1., 2., 1., 1., 3.]) clf = svm.SVC(kernel='linear').fit(X.toarray(), y) sp_clf = svm.SVC(kernel='linear').fit(sparse.coo_matrix(X), y) assert_array_equal(clf.support_vectors_, sp_clf.support_vectors_.toarray()) assert_array_equal(clf.dual_coef_, sp_clf.dual_coef_.toarray()) def test_sparse_svc_clone_with_callable_kernel(): # Test that the "dense_fit" is called even though we use sparse input # meaning that everything works fine. a = svm.SVC(C=1, kernel=lambda x, y: x * y.T, probability=True, random_state=0) b = base.clone(a) b.fit(X_sp, Y) pred = b.predict(X_sp) b.predict_proba(X_sp) dense_svm = svm.SVC(C=1, kernel=lambda x, y: np.dot(x, y.T), probability=True, random_state=0) pred_dense = dense_svm.fit(X, Y).predict(X) assert_array_equal(pred_dense, pred) # b.decision_function(X_sp) # XXX : should be supported def test_timeout(): sp = svm.SVC(C=1, kernel=lambda x, y: x * y.T, probability=True, random_state=0, max_iter=1) assert_warns(ConvergenceWarning, sp.fit, X_sp, Y) def test_consistent_proba(): a = svm.SVC(probability=True, max_iter=1, random_state=0) proba_1 = a.fit(X, Y).predict_proba(X) a = svm.SVC(probability=True, max_iter=1, random_state=0) proba_2 = a.fit(X, Y).predict_proba(X) assert_array_almost_equal(proba_1, proba_2)
bsd-3-clause
hlin117/scikit-learn
examples/ensemble/plot_forest_iris.py
18
6190
""" ==================================================================== Plot the decision surfaces of ensembles of trees on the iris dataset ==================================================================== Plot the decision surfaces of forests of randomized trees trained on pairs of features of the iris dataset. This plot compares the decision surfaces learned by a decision tree classifier (first column), by a random forest classifier (second column), by an extra- trees classifier (third column) and by an AdaBoost classifier (fourth column). In the first row, the classifiers are built using the sepal width and the sepal length features only, on the second row using the petal length and sepal length only, and on the third row using the petal width and the petal length only. In descending order of quality, when trained (outside of this example) on all 4 features using 30 estimators and scored using 10 fold cross validation, we see:: ExtraTreesClassifier() # 0.95 score RandomForestClassifier() # 0.94 score AdaBoost(DecisionTree(max_depth=3)) # 0.94 score DecisionTree(max_depth=None) # 0.94 score Increasing `max_depth` for AdaBoost lowers the standard deviation of the scores (but the average score does not improve). See the console's output for further details about each model. In this example you might try to: 1) vary the ``max_depth`` for the ``DecisionTreeClassifier`` and ``AdaBoostClassifier``, perhaps try ``max_depth=3`` for the ``DecisionTreeClassifier`` or ``max_depth=None`` for ``AdaBoostClassifier`` 2) vary ``n_estimators`` It is worth noting that RandomForests and ExtraTrees can be fitted in parallel on many cores as each tree is built independently of the others. AdaBoost's samples are built sequentially and so do not use multiple cores. """ print(__doc__) import numpy as np import matplotlib.pyplot as plt from matplotlib.colors import ListedColormap from sklearn import clone from sklearn.datasets import load_iris from sklearn.ensemble import (RandomForestClassifier, ExtraTreesClassifier, AdaBoostClassifier) from sklearn.externals.six.moves import xrange from sklearn.tree import DecisionTreeClassifier # Parameters n_classes = 3 n_estimators = 30 cmap = plt.cm.RdYlBu plot_step = 0.02 # fine step width for decision surface contours plot_step_coarser = 0.5 # step widths for coarse classifier guesses RANDOM_SEED = 13 # fix the seed on each iteration # Load data iris = load_iris() plot_idx = 1 models = [DecisionTreeClassifier(max_depth=None), RandomForestClassifier(n_estimators=n_estimators), ExtraTreesClassifier(n_estimators=n_estimators), AdaBoostClassifier(DecisionTreeClassifier(max_depth=3), n_estimators=n_estimators)] for pair in ([0, 1], [0, 2], [2, 3]): for model in models: # We only take the two corresponding features X = iris.data[:, pair] y = iris.target # Shuffle idx = np.arange(X.shape[0]) np.random.seed(RANDOM_SEED) np.random.shuffle(idx) X = X[idx] y = y[idx] # Standardize mean = X.mean(axis=0) std = X.std(axis=0) X = (X - mean) / std # Train clf = clone(model) clf = model.fit(X, y) scores = clf.score(X, y) # Create a title for each column and the console by using str() and # slicing away useless parts of the string model_title = str(type(model)).split(".")[-1][:-2][:-len("Classifier")] model_details = model_title if hasattr(model, "estimators_"): model_details += " with {} estimators".format(len(model.estimators_)) print( model_details + " with features", pair, "has a score of", scores ) plt.subplot(3, 4, plot_idx) if plot_idx <= len(models): # Add a title at the top of each column plt.title(model_title) # Now plot the decision boundary using a fine mesh as input to a # filled contour plot x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1 y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1 xx, yy = np.meshgrid(np.arange(x_min, x_max, plot_step), np.arange(y_min, y_max, plot_step)) # Plot either a single DecisionTreeClassifier or alpha blend the # decision surfaces of the ensemble of classifiers if isinstance(model, DecisionTreeClassifier): Z = model.predict(np.c_[xx.ravel(), yy.ravel()]) Z = Z.reshape(xx.shape) cs = plt.contourf(xx, yy, Z, cmap=cmap) else: # Choose alpha blend level with respect to the number of estimators # that are in use (noting that AdaBoost can use fewer estimators # than its maximum if it achieves a good enough fit early on) estimator_alpha = 1.0 / len(model.estimators_) for tree in model.estimators_: Z = tree.predict(np.c_[xx.ravel(), yy.ravel()]) Z = Z.reshape(xx.shape) cs = plt.contourf(xx, yy, Z, alpha=estimator_alpha, cmap=cmap) # Build a coarser grid to plot a set of ensemble classifications # to show how these are different to what we see in the decision # surfaces. These points are regularly space and do not have a black outline xx_coarser, yy_coarser = np.meshgrid(np.arange(x_min, x_max, plot_step_coarser), np.arange(y_min, y_max, plot_step_coarser)) Z_points_coarser = model.predict(np.c_[xx_coarser.ravel(), yy_coarser.ravel()]).reshape(xx_coarser.shape) cs_points = plt.scatter(xx_coarser, yy_coarser, s=15, c=Z_points_coarser, cmap=cmap, edgecolors="none") # Plot the training points, these are clustered together and have a # black outline plt.scatter(X[:, 0], X[:, 1], c=y, cmap=ListedColormap(['r', 'y', 'b'])) plot_idx += 1 # move on to the next plot in sequence plt.suptitle("Classifiers on feature subsets of the Iris dataset") plt.axis("tight") plt.show()
bsd-3-clause
natj/bender
paper/figs/fig9.py
1
4141
import numpy as np import math from pylab import * from palettable.wesanderson import Zissou_5 as wsZ import matplotlib.ticker as mtick from scipy.interpolate import interp1d from scipy.interpolate import griddata from scipy.signal import savgol_filter def smooth(xx, yy): yy = savgol_filter(yy, 7, 2) np.clip(yy, 0.0, 1000.0, out=yy) yy[0] = 0.0 yy[-1] = 0.0 return xx, yy #Read JN files def read_lineprof(fname): da = np.genfromtxt(fname, delimiter=",") des = np.diff(da[:,0])[2] norm = np.sum(des*da[:,1]) return da[:,0],da[:,1]/norm #Read JN files def read_csv(fname): da = np.genfromtxt(fname, delimiter=",") des = np.diff(da[:,0])[2] norm = np.sum(des*da[:,1]) return da[:,0],da[:,1] #/norm ## Plot fig = figure(figsize=(5,3), dpi=80) rc('font', family='serif') rc('xtick', labelsize='xx-small') rc('ytick', labelsize='xx-small') gs = GridSpec(1, 1) #gs.update(wspace = 0.34) #gs.update(hspace = 0.4) lsize = 10.0 xmin = 0.69 xmax = 0.82 #error window limits eymin = -0.5 eymax = 0.5 #path to files #path_JN = "../../out3/lines/" path_JN = "../../out/lines2/" #labels size tsize = 10.0 nu = '700' #fig.text(0.5, 0.92, '$\\theta_s = 18^{\\circ}$', ha='center', va='center', size=tsize) #fig.text(0.5, 0.72, '$\\theta_s = 45^{\\circ}$', ha='center', va='center', size=tsize) #fig.text(0.5, 0.52, '$\\theta_s = 90^{\\circ}$', ha='center', va='center', size=tsize) #fig.text(0.5, 0.32, 'Hopf $\\theta_s = 45^{\circ}$', ha='center', va='center', size=tsize) #fig.text(0.5, 0.12, 'Phase',ha='center', va='center', size=lsize) ax1 = subplot(gs[0,0]) ax1.minorticks_on() ax1.set_xlim(xmin, xmax) ax1.set_ylim(0.0, 30) ax1.set_ylabel('Normalized flux',size=lsize) ax1.set_xlabel('Energy $E/E\'$',size=lsize) #xx1, yy1 = read_lineprof(path_JN+'lineprof_f700pbbr10m1.4i20.csv') #ax1.plot(xx1, yy1, "k--") #xx2, yy2 = read_lineprof(path_JN+'lineprof_obl_HTq0_f700pbbr10m1.4i20.csv') #ax1.plot(xx2, yy2, "k-") #lineprof_obl_HTq3_f700pbbr10m1.4i20.csv #lineprof_obl_HTq5_f700pbbr10m1.4i20.csv #lineprof_obl_HTq2_f700pbbr10m1.4i20.csv files_JN = [ "lineprof_f700pbbr10m1.4i20.csv", "lineprof_obl_f700pbbr10m1.4i20.csv", #"lineprof_sph2_HTqfix_f700pbbr10m1.4i20.csv"] #"lineprof_obl_HTq0_f700pbbr10m1.4i20.csv", "lineprof_obl_HTq1_f700pbbr10m1.4i20.csv"] #"lineprof_obl_HTq4_f700pbbr10m1.4i20.csv"] files_JN = ['sch/lineprofile_f700_bb_r10_m1.4_i20.csv', 'obl/lineprofile_f700_bb_r10_m1.4_i20.csv', 'q/lineprofile_f700_bb_r10_m1.4_i20.csv'] cols = ["black", "blue", "red", "magenta"] i = 0 for file_name in files_JN: xx, yy = read_lineprof(path_JN+file_name) xx, yy = smooth(xx, yy) ax1.plot(xx, yy, color=cols[i], linestyle="solid") i += 1 #path_JN = "../../out3/lines/" xx, yy = read_lineprof("../../out3/lines/lineprof_obl_HTq4_f700pbbr10m1.4i20.csv") ax1.plot(xx, yy, color="red", linestyle="dashed") #files_Bau = [ #"sch+dopp.csv", #"sch+dopp+obl.csv", #"HT.csv", #"HT_obl.csv"] files_Bau = ['sch.csv', 'obl.csv', 'ht.csv'] i = 0 for file_name in files_Bau: xx, yy = read_csv(path_JN+file_name) #rescale xx for correct scaling #xx = (xx-0.72)/(0.89-0.72)*(0.8-0.72) + 0.72 #ax1.plot(xx, yy, color=cols[i], linestyle="dashed") i += 1 ############ q's #xx3, yy3 = read_lineprof(path_JN+'lineprof_obl_HTq1_f700pbbr10m1.4i20.csv') #ax1.plot(xx3, yy3, "k-", label="$q = -0.268$") # #xx4, yy4 = read_lineprof(path_JN+'lineprof_obl_HTq2_f700pbbr10m1.4i20.csv') #ax1.plot(xx4, yy4, "r-", label="$q \\times 2$") # #xx5, yy5 = read_lineprof(path_JN+'lineprof_obl_HTq3_f700pbbr10m1.4i20.csv') #ax1.plot(xx5, yy5, "g-", label="$q \\times 3$") # #xx6, yy6 = read_lineprof(path_JN+'lineprof_obl_HTq4_f700pbbr10m1.4i20.csv') #ax1.plot(xx6, yy6, "b-", label="$q \\times 4$") # #xx7, yy7 = read_lineprof(path_JN+'lineprof_obl_HTq5_f700pbbr10m1.4i20.csv') #ax1.plot(xx7, yy7, "m-", label="$q \\times 5$") # #legend = ax1.legend(loc='upper left', shadow=False, labelspacing=0.1) #for label in legend.get_texts(): # label.set_fontsize('x-small') savefig('fig9_testi.pdf', bbox_inches='tight')
mit
ralbayaty/KaggleRetina
testing/censureHistCalc.py
1
4517
from skimage.feature import CENSURE from skimage.color import rgb2gray import matplotlib.pyplot as plt import numpy as np import cv2 import sys from PIL import Image, ImageDraw def draw_keypoints(img, kp, scale): draw = ImageDraw.Draw(img) # Draw a maximum of 300 keypoints for i in range(min(len(scale),300)): x1 = kp[i,1] y1 = kp[i,0] x2 = kp[i,1]+2**scale[i] y2 = kp[i,0]+2**scale[i] coords = (x1, y1, x2, y2) draw.ellipse(coords, fill = None, outline ='white') if __name__ == '__main__': try: file_name = sys.argv[1] except: print("Didn't give me a file...") file_name = "Lenna.png" def nothing(*arg): pass # Create sliderbars to change the values of CENSURE parameters online # Defaults: min_scale=1, max_scale=7, mode='DoB', non_max_threshold=0.15, line_threshold=10 cv2.namedWindow('censure') cv2.createTrackbar('min_scale', 'censure', 1, 10, nothing) cv2.createTrackbar('max_scale', 'censure', 7, 20, nothing) cv2.createTrackbar('mode', 'censure', 2, 2, nothing) cv2.createTrackbar('non_max_threshold', 'censure', 6, 1000, nothing) cv2.createTrackbar('line_threshold', 'censure', 10, 100, nothing) # Read image from file, then inspect the image dimensions img = cv2.imread(file_name,1) height, width, channels = img.shape # Pull the different color channels from the image blue = img[:,:,0] green = img[:,:,1] red = img[:,:,2] gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) # Make a PIL image from each channel so we can use PIL.Iamge.thumbnail to resize if needed blue1 = Image.fromarray(blue) green1 = Image.fromarray(green) red1 = Image.fromarray(red) gray1 = Image.fromarray(gray) # Check if dimensions are above desired, if so then resize keepig aspect ratio m, n = 512, 512 if height > m or width > n: blue1.thumbnail((m,n), Image.ANTIALIAS) green1.thumbnail((m,n), Image.ANTIALIAS) red1.thumbnail((m,n), Image.ANTIALIAS) gray1.thumbnail((m,n), Image.ANTIALIAS) # CENSURE related mode_dict = {"0": "DoB", "1": "Octagon", "2": "STAR"} last_num_kp = 0 while True: vis = gray.copy() img = img1.copy() # Read the values of the sliderbars and save them to variables min_scale = cv2.getTrackbarPos('min_scale', 'censure') max_scale = cv2.getTrackbarPos('max_scale', 'censure') if min_scale is 0: min_scale = 1 if min_scale + max_scale < 3: max_scale = min_scale + 2 mode = mode_dict[str(cv2.getTrackbarPos('mode', 'censure'))] non_max_threshold = float(cv2.getTrackbarPos('non_max_threshold', 'censure'))/1000 line_threshold = cv2.getTrackbarPos('line_threshold', 'censure') # Create a CENSURE feature detector censure = CENSURE(min_scale=min_scale, max_scale=max_scale, mode=mode, non_max_threshold=non_max_threshold, line_threshold=line_threshold) # Obtain the CENSURE features censure.detect(blue1) kp_blue, scale_blue = censure.keypoints, censure.scales censure.detect(green1) kp_green, scale_green = censure.keypoints, censure.scales censure.detect(red1) kp_red, scale_red = censure.keypoints, censure.scales censure.detect(gray1) kp_gray, scale_gray = censure.keypoints, censure.scales # Print the # of features if it has changed between iterations num_kp = len(censure.keypoints) if last_num_kp != num_kp: print("Number of keypoints: " + str(len(censure.keypoints))) last_num_kp = num_kp # Draw the feature points on the images draw_keypoints(blue1, kp_blue, scale_blue) draw_keypoints(green1, kp_green, scale_green) draw_keypoints(red1, kp_red, scale_red) draw_keypoints(gray1, kp_gray, scale_gray) # Obtain the histogram of scale values plt.clf() # clear the figure from any previous plot scale_hist, bin_edges = np.histogram(censure.scales,max_scale-min_scale, (min_scale,max_scale+1)) plt.bar(bin_edges[:-1]-0.5, scale_hist, width = 1) plt.show(block=False) plt.draw() # Show the image with keypoints drawn over image = cv2.cvtColor(np.asarray(img),cv2.COLOR_BGR2RGB) cv2.imshow('censure', image) if 0xFF & cv2.waitKey(500) == 27: break cv2.destroyAllWindows()
gpl-2.0
gunan/tensorflow
tensorflow/python/keras/engine/data_adapter_test.py
1
43158
# Copyright 2019 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """DataAdapter tests.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import math from absl.testing import parameterized import numpy as np from tensorflow.python import keras from tensorflow.python.data.experimental.ops import cardinality from tensorflow.python.data.ops import dataset_ops from tensorflow.python.eager import context from tensorflow.python.framework import constant_op from tensorflow.python.framework import ops from tensorflow.python.framework import sparse_tensor from tensorflow.python.framework import test_util from tensorflow.python.keras import keras_parameterized from tensorflow.python.keras import testing_utils from tensorflow.python.keras.engine import data_adapter from tensorflow.python.keras.utils import data_utils from tensorflow.python.ops import array_ops from tensorflow.python.ops import sparse_ops from tensorflow.python.platform import test from tensorflow.python.util import nest class DummyArrayLike(object): """Dummy array-like object.""" def __init__(self, data): self.data = data def __len__(self): return len(self.data) def __getitem__(self, key): return self.data[key] @property def shape(self): return self.data.shape @property def dtype(self): return self.data.dtype def fail_on_convert(x, **kwargs): _ = x _ = kwargs raise TypeError('Cannot convert DummyArrayLike to a tensor') ops.register_tensor_conversion_function(DummyArrayLike, fail_on_convert) class DataAdapterTestBase(keras_parameterized.TestCase): def setUp(self): super(DataAdapterTestBase, self).setUp() self.batch_size = 5 self.numpy_input = np.zeros((50, 10)) self.numpy_target = np.ones(50) self.tensor_input = constant_op.constant(2.0, shape=(50, 10)) self.tensor_target = array_ops.ones((50,)) self.arraylike_input = DummyArrayLike(self.numpy_input) self.arraylike_target = DummyArrayLike(self.numpy_target) self.dataset_input = dataset_ops.DatasetV2.from_tensor_slices( (self.numpy_input, self.numpy_target)).shuffle(50).batch( self.batch_size) def generator(): while True: yield (np.zeros((self.batch_size, 10)), np.ones(self.batch_size)) self.generator_input = generator() self.iterator_input = data_utils.threadsafe_generator(generator)() self.sequence_input = TestSequence(batch_size=self.batch_size, feature_shape=10) self.model = keras.models.Sequential( [keras.layers.Dense(8, input_shape=(10,), activation='softmax')]) class TestSequence(data_utils.Sequence): def __init__(self, batch_size, feature_shape): self.batch_size = batch_size self.feature_shape = feature_shape def __getitem__(self, item): return (np.zeros((self.batch_size, self.feature_shape)), np.ones((self.batch_size,))) def __len__(self): return 10 class TensorLikeDataAdapterTest(DataAdapterTestBase): def setUp(self): super(TensorLikeDataAdapterTest, self).setUp() self.adapter_cls = data_adapter.TensorLikeDataAdapter def test_can_handle_numpy(self): self.assertTrue(self.adapter_cls.can_handle(self.numpy_input)) self.assertTrue( self.adapter_cls.can_handle(self.numpy_input, self.numpy_target)) self.assertFalse(self.adapter_cls.can_handle(self.dataset_input)) self.assertFalse(self.adapter_cls.can_handle(self.generator_input)) self.assertFalse(self.adapter_cls.can_handle(self.sequence_input)) def test_size_numpy(self): adapter = self.adapter_cls( self.numpy_input, self.numpy_target, batch_size=5) self.assertEqual(adapter.get_size(), 10) self.assertFalse(adapter.has_partial_batch()) def test_batch_size_numpy(self): adapter = self.adapter_cls( self.numpy_input, self.numpy_target, batch_size=5) self.assertEqual(adapter.batch_size(), 5) def test_partial_batch_numpy(self): adapter = self.adapter_cls( self.numpy_input, self.numpy_target, batch_size=4) self.assertEqual(adapter.get_size(), 13) # 50/4 self.assertTrue(adapter.has_partial_batch()) self.assertEqual(adapter.partial_batch_size(), 2) def test_epochs(self): num_epochs = 3 adapter = self.adapter_cls( self.numpy_input, self.numpy_target, batch_size=5, epochs=num_epochs) ds_iter = iter(adapter.get_dataset()) num_batches_per_epoch = self.numpy_input.shape[0] // 5 for _ in range(num_batches_per_epoch * num_epochs): next(ds_iter) with self.assertRaises(StopIteration): next(ds_iter) @keras_parameterized.run_all_keras_modes(always_skip_v1=True) def test_training_numpy(self): self.model.compile(loss='sparse_categorical_crossentropy', optimizer='sgd', run_eagerly=testing_utils.should_run_eagerly()) self.model.fit(self.numpy_input, self.numpy_target, batch_size=5) def test_can_handle_pandas(self): try: import pandas as pd # pylint: disable=g-import-not-at-top except ImportError: self.skipTest('Skipping test because pandas is not installed.') self.assertTrue(self.adapter_cls.can_handle(pd.DataFrame(self.numpy_input))) self.assertTrue( self.adapter_cls.can_handle(pd.DataFrame(self.numpy_input)[0])) self.assertTrue( self.adapter_cls.can_handle( pd.DataFrame(self.numpy_input), pd.DataFrame(self.numpy_input)[0])) @keras_parameterized.run_all_keras_modes(always_skip_v1=True) def test_training_pandas(self): try: import pandas as pd # pylint: disable=g-import-not-at-top except ImportError: self.skipTest('Skipping test because pandas is not installed.') input_a = keras.Input(shape=(3,), name='input_a') input_b = keras.Input(shape=(3,), name='input_b') input_c = keras.Input(shape=(1,), name='input_b') x = keras.layers.Dense(4, name='dense_1')(input_a) y = keras.layers.Dense(3, name='dense_2')(input_b) z = keras.layers.Dense(1, name='dense_3')(input_c) model_1 = keras.Model(inputs=input_a, outputs=x) model_2 = keras.Model(inputs=[input_a, input_b], outputs=[x, y]) model_3 = keras.Model(inputs=input_c, outputs=z) model_1.compile(optimizer='rmsprop', loss='mse') model_2.compile(optimizer='rmsprop', loss='mse') input_a_np = np.random.random((10, 3)) input_b_np = np.random.random((10, 3)) input_a_df = pd.DataFrame(input_a_np) input_b_df = pd.DataFrame(input_b_np) output_a_df = pd.DataFrame(np.random.random((10, 4))) output_b_df = pd.DataFrame(np.random.random((10, 3))) model_1.fit(input_a_df, output_a_df) model_2.fit([input_a_df, input_b_df], [output_a_df, output_b_df]) model_1.fit([input_a_df], [output_a_df]) model_1.fit({'input_a': input_a_df}, output_a_df) model_2.fit({'input_a': input_a_df, 'input_b': input_b_df}, [output_a_df, output_b_df]) model_1.evaluate(input_a_df, output_a_df) model_2.evaluate([input_a_df, input_b_df], [output_a_df, output_b_df]) model_1.evaluate([input_a_df], [output_a_df]) model_1.evaluate({'input_a': input_a_df}, output_a_df) model_2.evaluate({'input_a': input_a_df, 'input_b': input_b_df}, [output_a_df, output_b_df]) # Verify predicting on pandas vs numpy returns the same result predict_1_pandas = model_1.predict(input_a_df) predict_2_pandas = model_2.predict([input_a_df, input_b_df]) predict_3_pandas = model_3.predict(input_a_df[0]) predict_1_numpy = model_1.predict(input_a_np) predict_2_numpy = model_2.predict([input_a_np, input_b_np]) predict_3_numpy = model_3.predict(np.asarray(input_a_df[0])) self.assertAllClose(predict_1_numpy, predict_1_pandas) self.assertAllClose(predict_2_numpy, predict_2_pandas) self.assertAllClose(predict_3_numpy, predict_3_pandas) # Extra ways to pass in dataframes model_1.predict([input_a_df]) model_1.predict({'input_a': input_a_df}) model_2.predict({'input_a': input_a_df, 'input_b': input_b_df}) def test_can_handle(self): self.assertTrue(self.adapter_cls.can_handle(self.tensor_input)) self.assertTrue( self.adapter_cls.can_handle(self.tensor_input, self.tensor_target)) self.assertFalse(self.adapter_cls.can_handle(self.arraylike_input)) self.assertFalse( self.adapter_cls.can_handle(self.arraylike_input, self.arraylike_target)) self.assertFalse(self.adapter_cls.can_handle(self.dataset_input)) self.assertFalse(self.adapter_cls.can_handle(self.generator_input)) self.assertFalse(self.adapter_cls.can_handle(self.sequence_input)) @keras_parameterized.run_all_keras_modes(always_skip_v1=True) def test_training(self): self.model.compile(loss='sparse_categorical_crossentropy', optimizer='sgd', run_eagerly=testing_utils.should_run_eagerly()) self.model.fit(self.tensor_input, self.tensor_target, batch_size=5) def test_size(self): adapter = self.adapter_cls( self.tensor_input, self.tensor_target, batch_size=5) self.assertEqual(adapter.get_size(), 10) self.assertFalse(adapter.has_partial_batch()) def test_shuffle_correctness(self): with context.eager_mode(): num_samples = 100 batch_size = 32 x = np.arange(num_samples) np.random.seed(99) adapter = self.adapter_cls( x, y=None, batch_size=batch_size, shuffle=True, epochs=2) def _get_epoch(ds_iter): ds_data = [] for _ in range(int(math.ceil(num_samples / batch_size))): ds_data.append(next(ds_iter)[0].numpy()) return np.concatenate(ds_data) ds_iter = iter(adapter.get_dataset()) # First epoch. epoch_data = _get_epoch(ds_iter) # Check that shuffling occurred. self.assertNotAllClose(x, epoch_data) # Check that each elements appears, and only once. self.assertAllClose(x, np.sort(epoch_data)) # Second epoch. second_epoch_data = _get_epoch(ds_iter) # Check that shuffling occurred. self.assertNotAllClose(x, second_epoch_data) # Check that shuffling is different across epochs. self.assertNotAllClose(epoch_data, second_epoch_data) # Check that each elements appears, and only once. self.assertAllClose(x, np.sort(second_epoch_data)) def test_batch_shuffle_correctness(self): with context.eager_mode(): num_samples = 100 batch_size = 6 x = np.arange(num_samples) np.random.seed(99) adapter = self.adapter_cls( x, y=None, batch_size=batch_size, shuffle='batch', epochs=2) def _get_epoch_batches(ds_iter): ds_data = [] for _ in range(int(math.ceil(num_samples / batch_size))): ds_data.append(next(ds_iter)[0].numpy()) return ds_data ds_iter = iter(adapter.get_dataset()) # First epoch. epoch_batch_data = _get_epoch_batches(ds_iter) epoch_data = np.concatenate(epoch_batch_data) def _verify_batch(batch): # Verify that a batch contains only contiguous data, and that it has # been shuffled. shuffled_batch = np.sort(batch) self.assertNotAllClose(batch, shuffled_batch) for i in range(1, len(batch)): self.assertEqual(shuffled_batch[i-1] + 1, shuffled_batch[i]) # Assert that the data within each batch remains contiguous for batch in epoch_batch_data: _verify_batch(batch) # Check that individual batches are unshuffled # Check that shuffling occurred. self.assertNotAllClose(x, epoch_data) # Check that each elements appears, and only once. self.assertAllClose(x, np.sort(epoch_data)) # Second epoch. second_epoch_batch_data = _get_epoch_batches(ds_iter) second_epoch_data = np.concatenate(second_epoch_batch_data) # Assert that the data within each batch remains contiguous for batch in second_epoch_batch_data: _verify_batch(batch) # Check that shuffling occurred. self.assertNotAllClose(x, second_epoch_data) # Check that shuffling is different across epochs. self.assertNotAllClose(epoch_data, second_epoch_data) # Check that each elements appears, and only once. self.assertAllClose(x, np.sort(second_epoch_data)) @parameterized.named_parameters( ('batch_size_5', 5, None, 5), ('batch_size_50', 50, 4, 50), # Sanity check: batch_size takes precedence ('steps_1', None, 1, 50), ('steps_4', None, 4, 13), ) def test_batch_size(self, batch_size_in, steps, batch_size_out): adapter = self.adapter_cls( self.tensor_input, self.tensor_target, batch_size=batch_size_in, steps=steps) self.assertEqual(adapter.batch_size(), batch_size_out) @parameterized.named_parameters( ('batch_size_5', 5, None, 10, 0), ('batch_size_4', 4, None, 13, 2), ('steps_1', None, 1, 1, 0), ('steps_5', None, 5, 5, 0), ('steps_4', None, 4, 4, 11), ) def test_partial_batch( self, batch_size_in, steps, size, partial_batch_size): adapter = self.adapter_cls( self.tensor_input, self.tensor_target, batch_size=batch_size_in, steps=steps) self.assertEqual(adapter.get_size(), size) # 50/steps self.assertEqual(adapter.has_partial_batch(), bool(partial_batch_size)) self.assertEqual(adapter.partial_batch_size(), partial_batch_size or None) class GenericArrayLikeDataAdapterTest(DataAdapterTestBase): def setUp(self): super(GenericArrayLikeDataAdapterTest, self).setUp() self.adapter_cls = data_adapter.GenericArrayLikeDataAdapter def test_can_handle_some_numpy(self): self.assertTrue(self.adapter_cls.can_handle( self.arraylike_input)) self.assertTrue( self.adapter_cls.can_handle(self.arraylike_input, self.arraylike_target)) # Because adapters are mutually exclusive, don't handle cases # where all the data is numpy or an eagertensor self.assertFalse(self.adapter_cls.can_handle(self.numpy_input)) self.assertFalse( self.adapter_cls.can_handle(self.numpy_input, self.numpy_target)) self.assertFalse(self.adapter_cls.can_handle(self.tensor_input)) self.assertFalse( self.adapter_cls.can_handle(self.tensor_input, self.tensor_target)) # But do handle mixes that include generic arraylike data self.assertTrue( self.adapter_cls.can_handle(self.numpy_input, self.arraylike_target)) self.assertTrue( self.adapter_cls.can_handle(self.arraylike_input, self.numpy_target)) self.assertTrue( self.adapter_cls.can_handle(self.arraylike_input, self.tensor_target)) self.assertTrue( self.adapter_cls.can_handle(self.tensor_input, self.arraylike_target)) self.assertFalse(self.adapter_cls.can_handle(self.dataset_input)) self.assertFalse(self.adapter_cls.can_handle(self.generator_input)) self.assertFalse(self.adapter_cls.can_handle(self.sequence_input)) def test_size(self): adapter = self.adapter_cls( self.arraylike_input, self.arraylike_target, batch_size=5) self.assertEqual(adapter.get_size(), 10) self.assertFalse(adapter.has_partial_batch()) def test_epochs(self): num_epochs = 3 adapter = self.adapter_cls( self.arraylike_input, self.numpy_target, batch_size=5, epochs=num_epochs) ds_iter = iter(adapter.get_dataset()) num_batches_per_epoch = self.numpy_input.shape[0] // 5 for _ in range(num_batches_per_epoch * num_epochs): next(ds_iter) with self.assertRaises(StopIteration): next(ds_iter) @keras_parameterized.run_all_keras_modes(always_skip_v1=True) def test_training(self): # First verify that DummyArrayLike can't be converted to a Tensor with self.assertRaises(TypeError): ops.convert_to_tensor_v2(self.arraylike_input) # Then train on the array like. # It should not be converted to a tensor directly (which would force it into # memory), only the sliced data should be converted. self.model.compile(loss='sparse_categorical_crossentropy', optimizer='sgd', run_eagerly=testing_utils.should_run_eagerly()) self.model.fit(self.arraylike_input, self.arraylike_target, batch_size=5) self.model.fit(self.arraylike_input, self.arraylike_target, shuffle=True, batch_size=5) self.model.fit(self.arraylike_input, self.arraylike_target, shuffle='batch', batch_size=5) self.model.evaluate(self.arraylike_input, self.arraylike_target, batch_size=5) self.model.predict(self.arraylike_input, batch_size=5) @keras_parameterized.run_all_keras_modes(always_skip_v1=True) def test_training_numpy_target(self): self.model.compile(loss='sparse_categorical_crossentropy', optimizer='sgd', run_eagerly=testing_utils.should_run_eagerly()) self.model.fit(self.arraylike_input, self.numpy_target, batch_size=5) self.model.fit(self.arraylike_input, self.numpy_target, shuffle=True, batch_size=5) self.model.fit(self.arraylike_input, self.numpy_target, shuffle='batch', batch_size=5) self.model.evaluate(self.arraylike_input, self.numpy_target, batch_size=5) @keras_parameterized.run_all_keras_modes(always_skip_v1=True) def test_training_tensor_target(self): self.model.compile(loss='sparse_categorical_crossentropy', optimizer='sgd', run_eagerly=testing_utils.should_run_eagerly()) self.model.fit(self.arraylike_input, self.tensor_target, batch_size=5) self.model.fit(self.arraylike_input, self.tensor_target, shuffle=True, batch_size=5) self.model.fit(self.arraylike_input, self.tensor_target, shuffle='batch', batch_size=5) self.model.evaluate(self.arraylike_input, self.tensor_target, batch_size=5) def test_shuffle_correctness(self): with context.eager_mode(): num_samples = 100 batch_size = 32 x = DummyArrayLike(np.arange(num_samples)) np.random.seed(99) adapter = self.adapter_cls( x, y=None, batch_size=batch_size, shuffle=True, epochs=2) def _get_epoch(ds_iter): ds_data = [] for _ in range(int(math.ceil(num_samples / batch_size))): ds_data.append(next(ds_iter)[0].numpy()) return np.concatenate(ds_data) ds_iter = iter(adapter.get_dataset()) # First epoch. epoch_data = _get_epoch(ds_iter) # Check that shuffling occurred. self.assertNotAllClose(x, epoch_data) # Check that each elements appears, and only once. self.assertAllClose(x, np.sort(epoch_data)) # Second epoch. second_epoch_data = _get_epoch(ds_iter) # Check that shuffling occurred. self.assertNotAllClose(x, second_epoch_data) # Check that shuffling is different across epochs. self.assertNotAllClose(epoch_data, second_epoch_data) # Check that each elements appears, and only once. self.assertAllClose(x, np.sort(second_epoch_data)) def test_batch_shuffle_correctness(self): with context.eager_mode(): num_samples = 100 batch_size = 6 x = DummyArrayLike(np.arange(num_samples)) np.random.seed(99) adapter = self.adapter_cls( x, y=None, batch_size=batch_size, shuffle='batch', epochs=2) def _get_epoch_batches(ds_iter): ds_data = [] for _ in range(int(math.ceil(num_samples / batch_size))): ds_data.append(next(ds_iter)[0].numpy()) return ds_data ds_iter = iter(adapter.get_dataset()) # First epoch. epoch_batch_data = _get_epoch_batches(ds_iter) epoch_data = np.concatenate(epoch_batch_data) def _verify_batch(batch): # Verify that a batch contains only contiguous data, but that it has # been shuffled. shuffled_batch = np.sort(batch) self.assertNotAllClose(batch, shuffled_batch) for i in range(1, len(batch)): self.assertEqual(shuffled_batch[i-1] + 1, shuffled_batch[i]) # Assert that the data within each batch is shuffled contiguous data for batch in epoch_batch_data: _verify_batch(batch) # Check that individual batches are unshuffled # Check that shuffling occurred. self.assertNotAllClose(x, epoch_data) # Check that each elements appears, and only once. self.assertAllClose(x, np.sort(epoch_data)) # Second epoch. second_epoch_batch_data = _get_epoch_batches(ds_iter) second_epoch_data = np.concatenate(second_epoch_batch_data) # Assert that the data within each batch remains contiguous for batch in second_epoch_batch_data: _verify_batch(batch) # Check that shuffling occurred. self.assertNotAllClose(x, second_epoch_data) # Check that shuffling is different across epochs. self.assertNotAllClose(epoch_data, second_epoch_data) # Check that each elements appears, and only once. self.assertAllClose(x, np.sort(second_epoch_data)) @parameterized.named_parameters( ('batch_size_5', 5, None, 5), ('batch_size_50', 50, 4, 50), # Sanity check: batch_size takes precedence ('steps_1', None, 1, 50), ('steps_4', None, 4, 13), ) def test_batch_size(self, batch_size_in, steps, batch_size_out): adapter = self.adapter_cls( self.arraylike_input, self.arraylike_target, batch_size=batch_size_in, steps=steps) self.assertEqual(adapter.batch_size(), batch_size_out) @parameterized.named_parameters( ('batch_size_5', 5, None, 10, 0), ('batch_size_4', 4, None, 13, 2), ('steps_1', None, 1, 1, 0), ('steps_5', None, 5, 5, 0), ('steps_4', None, 4, 4, 11), ) def test_partial_batch( self, batch_size_in, steps, size, partial_batch_size): adapter = self.adapter_cls( self.arraylike_input, self.arraylike_target, batch_size=batch_size_in, steps=steps) self.assertEqual(adapter.get_size(), size) # 50/steps self.assertEqual(adapter.has_partial_batch(), bool(partial_batch_size)) self.assertEqual(adapter.partial_batch_size(), partial_batch_size or None) class DatasetAdapterTest(DataAdapterTestBase): def setUp(self): super(DatasetAdapterTest, self).setUp() self.adapter_cls = data_adapter.DatasetAdapter def test_can_handle(self): self.assertFalse(self.adapter_cls.can_handle(self.numpy_input)) self.assertFalse(self.adapter_cls.can_handle(self.tensor_input)) self.assertTrue(self.adapter_cls.can_handle(self.dataset_input)) self.assertFalse(self.adapter_cls.can_handle(self.generator_input)) self.assertFalse(self.adapter_cls.can_handle(self.sequence_input)) @keras_parameterized.run_all_keras_modes(always_skip_v1=True) def test_training(self): dataset = self.adapter_cls(self.dataset_input).get_dataset() self.model.compile(loss='sparse_categorical_crossentropy', optimizer='sgd', run_eagerly=testing_utils.should_run_eagerly()) self.model.fit(dataset) def test_size(self): adapter = self.adapter_cls(self.dataset_input) self.assertIsNone(adapter.get_size()) def test_batch_size(self): adapter = self.adapter_cls(self.dataset_input) self.assertIsNone(adapter.batch_size()) def test_partial_batch(self): adapter = self.adapter_cls(self.dataset_input) self.assertFalse(adapter.has_partial_batch()) self.assertIsNone(adapter.partial_batch_size()) def test_invalid_targets_argument(self): with self.assertRaisesRegexp(ValueError, r'`y` argument is not supported'): self.adapter_cls(self.dataset_input, y=self.dataset_input) def test_invalid_sample_weights_argument(self): with self.assertRaisesRegexp(ValueError, r'`sample_weight` argument is not supported'): self.adapter_cls(self.dataset_input, sample_weights=self.dataset_input) class GeneratorDataAdapterTest(DataAdapterTestBase): def setUp(self): super(GeneratorDataAdapterTest, self).setUp() self.adapter_cls = data_adapter.GeneratorDataAdapter def test_can_handle(self): self.assertFalse(self.adapter_cls.can_handle(self.numpy_input)) self.assertFalse(self.adapter_cls.can_handle(self.tensor_input)) self.assertFalse(self.adapter_cls.can_handle(self.dataset_input)) self.assertTrue(self.adapter_cls.can_handle(self.generator_input)) self.assertFalse(self.adapter_cls.can_handle(self.sequence_input)) @keras_parameterized.run_all_keras_modes(always_skip_v1=True) def test_training(self): self.model.compile(loss='sparse_categorical_crossentropy', optimizer='sgd', run_eagerly=testing_utils.should_run_eagerly()) self.model.fit(self.generator_input, steps_per_epoch=10) @keras_parameterized.run_all_keras_modes(always_skip_v1=True) @test_util.run_v2_only @data_utils.dont_use_multiprocessing_pool def test_with_multiprocessing_training(self): self.model.compile(loss='sparse_categorical_crossentropy', optimizer='sgd', run_eagerly=testing_utils.should_run_eagerly()) self.model.fit(self.iterator_input, workers=1, use_multiprocessing=True, max_queue_size=10, steps_per_epoch=10) # Fit twice to ensure there isn't any duplication that prevent the worker # from starting. self.model.fit(self.iterator_input, workers=1, use_multiprocessing=True, max_queue_size=10, steps_per_epoch=10) def test_size(self): adapter = self.adapter_cls(self.generator_input) self.assertIsNone(adapter.get_size()) def test_batch_size(self): adapter = self.adapter_cls(self.generator_input) self.assertEqual(adapter.batch_size(), None) self.assertEqual(adapter.representative_batch_size(), 5) def test_partial_batch(self): adapter = self.adapter_cls(self.generator_input) self.assertFalse(adapter.has_partial_batch()) self.assertIsNone(adapter.partial_batch_size()) def test_invalid_targets_argument(self): with self.assertRaisesRegexp(ValueError, r'`y` argument is not supported'): self.adapter_cls(self.generator_input, y=self.generator_input) def test_invalid_sample_weights_argument(self): with self.assertRaisesRegexp(ValueError, r'`sample_weight` argument is not supported'): self.adapter_cls( self.generator_input, sample_weights=self.generator_input) def test_not_shuffled(self): def generator(): for i in range(10): yield np.ones((1, 1)) * i adapter = self.adapter_cls(generator(), shuffle=True) with context.eager_mode(): for i, data in enumerate(adapter.get_dataset()): self.assertEqual(i, data[0].numpy().flatten()) class KerasSequenceAdapterTest(DataAdapterTestBase): def setUp(self): super(KerasSequenceAdapterTest, self).setUp() self.adapter_cls = data_adapter.KerasSequenceAdapter def test_can_handle(self): self.assertFalse(self.adapter_cls.can_handle(self.numpy_input)) self.assertFalse(self.adapter_cls.can_handle(self.tensor_input)) self.assertFalse(self.adapter_cls.can_handle(self.dataset_input)) self.assertFalse(self.adapter_cls.can_handle(self.generator_input)) self.assertTrue(self.adapter_cls.can_handle(self.sequence_input)) @keras_parameterized.run_all_keras_modes(always_skip_v1=True) def test_training(self): self.model.compile(loss='sparse_categorical_crossentropy', optimizer='sgd', run_eagerly=testing_utils.should_run_eagerly()) self.model.fit(self.sequence_input) @keras_parameterized.run_all_keras_modes(always_skip_v1=True) @test_util.run_v2_only @data_utils.dont_use_multiprocessing_pool def test_with_multiprocessing_training(self): self.model.compile(loss='sparse_categorical_crossentropy', optimizer='sgd', run_eagerly=testing_utils.should_run_eagerly()) self.model.fit(self.sequence_input, workers=1, use_multiprocessing=True, max_queue_size=10, steps_per_epoch=10) # Fit twice to ensure there isn't any duplication that prevent the worker # from starting. self.model.fit(self.sequence_input, workers=1, use_multiprocessing=True, max_queue_size=10, steps_per_epoch=10) def test_size(self): adapter = self.adapter_cls(self.sequence_input) self.assertEqual(adapter.get_size(), 10) def test_batch_size(self): adapter = self.adapter_cls(self.sequence_input) self.assertEqual(adapter.batch_size(), None) self.assertEqual(adapter.representative_batch_size(), 5) def test_partial_batch(self): adapter = self.adapter_cls(self.sequence_input) self.assertFalse(adapter.has_partial_batch()) self.assertIsNone(adapter.partial_batch_size()) def test_invalid_targets_argument(self): with self.assertRaisesRegexp(ValueError, r'`y` argument is not supported'): self.adapter_cls(self.sequence_input, y=self.sequence_input) def test_invalid_sample_weights_argument(self): with self.assertRaisesRegexp(ValueError, r'`sample_weight` argument is not supported'): self.adapter_cls(self.sequence_input, sample_weights=self.sequence_input) class DataHandlerTest(keras_parameterized.TestCase): def test_finite_dataset_with_steps_per_epoch(self): data = dataset_ops.Dataset.from_tensor_slices([0, 1, 2, 3]).batch(1) # User can choose to only partially consume `Dataset`. data_handler = data_adapter.DataHandler( data, initial_epoch=0, epochs=2, steps_per_epoch=2) self.assertEqual(data_handler.inferred_steps, 2) self.assertFalse(data_handler._adapter.should_recreate_iterator()) returned_data = [] for _, iterator in data_handler.enumerate_epochs(): epoch_data = [] for _ in data_handler.steps(): epoch_data.append(next(iterator).numpy()) returned_data.append(epoch_data) self.assertEqual(returned_data, [[0, 1], [2, 3]]) def test_finite_dataset_without_steps_per_epoch(self): data = dataset_ops.Dataset.from_tensor_slices([0, 1, 2]).batch(1) data_handler = data_adapter.DataHandler(data, initial_epoch=0, epochs=2) self.assertEqual(data_handler.inferred_steps, 3) returned_data = [] for _, iterator in data_handler.enumerate_epochs(): epoch_data = [] for _ in data_handler.steps(): epoch_data.append(next(iterator).numpy()) returned_data.append(epoch_data) self.assertEqual(returned_data, [[0, 1, 2], [0, 1, 2]]) def test_finite_dataset_with_steps_per_epoch_exact_size(self): data = dataset_ops.Dataset.from_tensor_slices([0, 1, 2, 3]).batch(1) # If user specifies exact size of `Dataset` as `steps_per_epoch`, # create a new iterator each epoch. data_handler = data_adapter.DataHandler( data, initial_epoch=0, epochs=2, steps_per_epoch=4) self.assertTrue(data_handler._adapter.should_recreate_iterator()) returned_data = [] for _, iterator in data_handler.enumerate_epochs(): epoch_data = [] for _ in data_handler.steps(): epoch_data.append(next(iterator).numpy()) returned_data.append(epoch_data) self.assertEqual(returned_data, [[0, 1, 2, 3], [0, 1, 2, 3]]) def test_infinite_dataset_with_steps_per_epoch(self): data = dataset_ops.Dataset.from_tensor_slices([0, 1, 2]).batch(1).repeat() data_handler = data_adapter.DataHandler( data, initial_epoch=0, epochs=2, steps_per_epoch=3) returned_data = [] for _, iterator in data_handler.enumerate_epochs(): epoch_data = [] for _ in data_handler.steps(): epoch_data.append(next(iterator).numpy()) returned_data.append(epoch_data) self.assertEqual(returned_data, [[0, 1, 2], [0, 1, 2]]) def test_unknown_cardinality_dataset_with_steps_per_epoch(self): ds = dataset_ops.DatasetV2.from_tensor_slices([0, 1, 2, 3, 4, 5, 6]) filtered_ds = ds.filter(lambda x: x < 4) self.assertEqual( cardinality.cardinality(filtered_ds).numpy(), cardinality.UNKNOWN) # User can choose to only partially consume `Dataset`. data_handler = data_adapter.DataHandler( filtered_ds, initial_epoch=0, epochs=2, steps_per_epoch=2) self.assertFalse(data_handler._adapter.should_recreate_iterator()) returned_data = [] for _, iterator in data_handler.enumerate_epochs(): epoch_data = [] for _ in data_handler.steps(): epoch_data.append(next(iterator)) returned_data.append(epoch_data) returned_data = self.evaluate(returned_data) self.assertEqual(returned_data, [[0, 1], [2, 3]]) self.assertEqual(data_handler.inferred_steps, 2) def test_unknown_cardinality_dataset_without_steps_per_epoch(self): ds = dataset_ops.DatasetV2.from_tensor_slices([0, 1, 2, 3, 4, 5, 6]) filtered_ds = ds.filter(lambda x: x < 4) self.assertEqual( cardinality.cardinality(filtered_ds).numpy(), cardinality.UNKNOWN) data_handler = data_adapter.DataHandler( filtered_ds, initial_epoch=0, epochs=2) self.assertEqual(data_handler.inferred_steps, None) self.assertTrue(data_handler._adapter.should_recreate_iterator()) returned_data = [] for _, iterator in data_handler.enumerate_epochs(): epoch_data = [] with data_handler.catch_stop_iteration(): for _ in data_handler.steps(): epoch_data.append(next(iterator)) returned_data.append(epoch_data) returned_data = self.evaluate(returned_data) self.assertEqual(returned_data, [[0, 1, 2, 3], [0, 1, 2, 3]]) self.assertEqual(data_handler.inferred_steps, 4) def test_insufficient_data(self): ds = dataset_ops.DatasetV2.from_tensor_slices([0, 1]) ds = ds.filter(lambda *args, **kwargs: True) data_handler = data_adapter.DataHandler( ds, initial_epoch=0, epochs=2, steps_per_epoch=3) returned_data = [] for _, iterator in data_handler.enumerate_epochs(): epoch_data = [] for _ in data_handler.steps(): with data_handler.catch_stop_iteration(): epoch_data.append(next(iterator)) returned_data.append(epoch_data) returned_data = self.evaluate(returned_data) self.assertTrue(data_handler._insufficient_data) self.assertEqual(returned_data, [[0, 1]]) def test_numpy(self): x = np.array([0, 1, 2]) y = np.array([0, 2, 4]) sw = np.array([0, 4, 8]) data_handler = data_adapter.DataHandler( x=x, y=y, sample_weight=sw, batch_size=1, epochs=2) returned_data = [] for _, iterator in data_handler.enumerate_epochs(): epoch_data = [] for _ in data_handler.steps(): epoch_data.append(next(iterator)) returned_data.append(epoch_data) returned_data = self.evaluate(returned_data) self.assertEqual(returned_data, [[(0, 0, 0), (1, 2, 4), (2, 4, 8)], [(0, 0, 0), (1, 2, 4), (2, 4, 8)]]) def test_generator(self): def generator(): for _ in range(2): for step in range(3): yield (ops.convert_to_tensor_v2([step]),) data_handler = data_adapter.DataHandler( generator(), epochs=2, steps_per_epoch=3) returned_data = [] for _, iterator in data_handler.enumerate_epochs(): epoch_data = [] for _ in data_handler.steps(): epoch_data.append(next(iterator)) returned_data.append(epoch_data) returned_data = self.evaluate(returned_data) self.assertEqual(returned_data, [[([0],), ([1],), ([2],)], [([0],), ([1],), ([2],)]]) def test_composite_tensor(self): st = sparse_tensor.SparseTensor( indices=[[0, 0], [1, 0], [2, 0]], values=[0, 1, 2], dense_shape=[3, 1]) data_handler = data_adapter.DataHandler(st, epochs=2, steps_per_epoch=3) returned_data = [] for _, iterator in data_handler.enumerate_epochs(): epoch_data = [] for _ in data_handler.steps(): epoch_data.append(next(iterator)) returned_data.append(epoch_data) returned_data = self.evaluate( nest.map_structure(sparse_ops.sparse_tensor_to_dense, returned_data)) self.assertEqual(returned_data, [[([0],), ([1],), ([2],)], [([0],), ([1],), ([2],)]]) def test_list_of_scalars(self): data_handler = data_adapter.DataHandler([[0], [1], [2]], epochs=2, steps_per_epoch=3) returned_data = [] for _, iterator in data_handler.enumerate_epochs(): epoch_data = [] for _ in data_handler.steps(): epoch_data.append(next(iterator)) returned_data.append(epoch_data) returned_data = self.evaluate(returned_data) self.assertEqual(returned_data, [[([0],), ([1],), ([2],)], [([0],), ([1],), ([2],)]]) def test_class_weight_user_errors(self): with self.assertRaisesRegexp(ValueError, 'to be a dict with keys'): data_adapter.DataHandler( x=[[0], [1], [2]], y=[[2], [1], [0]], batch_size=1, sample_weight=[[1.], [2.], [4.]], class_weight={ 0: 0.5, 1: 1., 3: 1.5 # Skips class `2`. }) with self.assertRaisesRegexp(ValueError, 'with a single output'): data_adapter.DataHandler( x=np.ones((10, 1)), y=[np.ones((10, 1)), np.zeros((10, 1))], batch_size=2, class_weight={ 0: 0.5, 1: 1., 2: 1.5 }) class TestValidationSplit(keras_parameterized.TestCase): @parameterized.named_parameters(('numpy_arrays', True), ('tensors', False)) def test_validation_split_shuffled(self, use_numpy): if use_numpy: x = np.array([0, 1, 2, 3, 4]) y = np.array([0, 2, 4, 6, 8]) sw = np.array([0, 4, 8, 12, 16]) else: x = ops.convert_to_tensor_v2([0, 1, 2, 3, 4]) y = ops.convert_to_tensor_v2([0, 2, 4, 6, 8]) sw = ops.convert_to_tensor_v2([0, 4, 8, 12, 16]) (train_x, train_y, train_sw), (val_x, val_y, val_sw) = ( data_adapter.train_validation_split((x, y, sw), validation_split=0.2)) self.assertEqual(int(train_x.shape[0]), 4) self.assertEqual(int(train_y.shape[0]), 4) self.assertEqual(int(train_sw.shape[0]), 4) for i in range(4): # Check that all arrays were shuffled in identical order. self.assertEqual(2 * train_x[i].numpy(), train_y[i].numpy()) self.assertEqual(2 * train_y[i].numpy(), train_sw[i].numpy()) self.assertEqual(int(val_x.shape[0]), 1) self.assertEqual(int(val_y.shape[0]), 1) self.assertEqual(int(val_sw.shape[0]), 1) for i in range(1): # Check that all arrays were shuffled in identical order. self.assertEqual(2 * train_x[i].numpy(), train_y[i].numpy()) self.assertEqual(2 * train_y[i].numpy(), train_sw[i].numpy()) # Check that arrays contain expected values. self.assertEqual( sorted(array_ops.concat([train_x, val_x], axis=0).numpy().tolist()), sorted(ops.convert_to_tensor_v2(x).numpy().tolist())) self.assertEqual( sorted(array_ops.concat([train_y, val_y], axis=0).numpy().tolist()), sorted(ops.convert_to_tensor_v2(y).numpy().tolist())) self.assertEqual( sorted(array_ops.concat([train_sw, val_sw], axis=0).numpy().tolist()), sorted(ops.convert_to_tensor_v2(sw).numpy().tolist())) @parameterized.named_parameters(('numpy_arrays', True), ('tensors', False)) def test_validation_split_unshuffled(self, use_numpy): if use_numpy: x = np.array([0, 1, 2, 3, 4]) y = np.array([0, 2, 4, 6, 8]) sw = np.array([0, 4, 8, 12, 16]) else: x = ops.convert_to_tensor_v2([0, 1, 2, 3, 4]) y = ops.convert_to_tensor_v2([0, 2, 4, 6, 8]) sw = ops.convert_to_tensor_v2([0, 4, 8, 12, 16]) (train_x, train_y, train_sw), (val_x, val_y, val_sw) = ( data_adapter.train_validation_split((x, y, sw), validation_split=0.2, shuffle=False)) self.assertEqual(train_x.numpy().tolist(), [0, 1, 2, 3]) self.assertEqual(train_y.numpy().tolist(), [0, 2, 4, 6]) self.assertEqual(train_sw.numpy().tolist(), [0, 4, 8, 12]) self.assertEqual(val_x.numpy().tolist(), [4]) self.assertEqual(val_y.numpy().tolist(), [8]) self.assertEqual(val_sw.numpy().tolist(), [16]) def test_validation_split_user_error(self): with self.assertRaisesRegexp(ValueError, 'is only supported for Tensors'): data_adapter.train_validation_split( lambda: np.ones((10, 1)), validation_split=0.2) def test_validation_split_examples_too_few(self): with self.assertRaisesRegexp( ValueError, 'not sufficient to split it'): data_adapter.train_validation_split( np.ones((1, 10)), validation_split=0.2) def test_validation_split_none(self): train_sw, val_sw = data_adapter.train_validation_split( None, validation_split=0.2) self.assertIsNone(train_sw) self.assertIsNone(val_sw) (_, train_sw), (_, val_sw) = data_adapter.train_validation_split( (np.ones((10, 1)), None), validation_split=0.2) self.assertIsNone(train_sw) self.assertIsNone(val_sw) class TestUtils(keras_parameterized.TestCase): def test_expand_1d_sparse_tensors_untouched(self): st = sparse_tensor.SparseTensor( indices=[[0], [10]], values=[1, 2], dense_shape=[10]) st = data_adapter.expand_1d(st) self.assertEqual(st.shape.rank, 1) if __name__ == '__main__': ops.enable_eager_execution() test.main()
apache-2.0
sonalranjit/SECS
SECS_trace.py
2
1609
__author__ = 'sonal' import numpy as np from mpl_toolkits.basemap import Basemap import matplotlib.pyplot as plt from matplotlib import cm from mpl_toolkits.axes_grid1 import make_axes_locatable import os from math import * def polar_plot(grid, title): #z = grid[:,8] u = grid[:,8] v = grid[:,9] plt.figure(figsize=(18,18)) ax = plt.gca() #m = Basemap(projection='npaeqd',boundinglat=20,lon_0=-100.,resolution='l') m = Basemap(width=8000000, height=8000000, resolution='l', projection='lcc',\ lat_0=60,lon_0=-100.) m.drawcoastlines() m.drawparallels(np.arange(-80.,81,20.),labels=[1,0,0,0],fontsize=10) m.drawmeridians(np.arange(-180.,181.,20.),labels=[0,0,0,1],fontsize=10) x,y =m(grid[:,7],grid[:,6]) sc = m.scatter(x,y,s=abs(u),c=u,marker=',',cmap=cm.jet,alpha=0.9,edgecolors='none') plt.title(title) divider = make_axes_locatable(ax) cax = divider.append_axes("right", size="5%", pad=0.05) cb1 = plt.colorbar(sc,cax=cax) cb1.set_label("mA/m",fontsize=18) plt.savefig('GOCE_asc_EICSu_krigged_201104.png',bbox_inches='tight',pad_inches=0.2) #plt.show() def asc_desc(data): asc = [] desc = [] lat = data[:,6] for i in range(0,len(data)-1): if lat[i+1] >= lat[i]: asc.append(i) else: desc.append(i) return asc, desc SECS_data = np.loadtxt('EICS_201103_krigged.txt') asc_idx, desc_idx= asc_desc(SECS_data) asc_track = SECS_data[asc_idx,:] desc_track = SECS_data[desc_idx,:] polar_plot(asc_track,'GOCE Ascending EICS u component Krigged April, 2011')
gpl-2.0
apaloczy/ap_tools
utils.py
1
54151
# Description: General-purpose functions for personal use. # Author: André Palóczy # E-mail: paloczy@gmail.com __all__ = ['seasonal_avg', 'seasonal_std', 'deseason', 'blkavg', 'blkavgdir', 'blkavgt', 'blkapply', 'stripmsk', 'pydatetime2m_arr', 'm2pydatetime_arr', 'npdt2dt', 'dt2sfloat', 'doy2date', 'flowfun', 'cumsimp', 'rot_vec', 'avgdir', 'lon180to360', 'lon360to180', 'bbox2ij', 'xy2dist', 'get_xtrackline', 'get_arrdepth', 'fpointsbox', 'near', 'near2', 'mnear', 'refine', 'denan', 'standardize', 'linear_trend', 'thomas', 'point_in_poly', 'get_mask_from_poly', 'sphericalpolygon_area', 'greatCircleBearing', 'weim', 'smoo2', 'topo_slope', 'curvature_geometric', 'get_isobath', 'angle_isobath', 'isopyc_depth', 'whiten_zero', 'wind2stress', 'gen_dates', 'fmt_isobath', 'float2latex', 'mat2npz', 'bb_map', 'dots_dualcolor'] from os import system import numpy as np import matplotlib.pyplot as plt import matplotlib from matplotlib import path from mpl_toolkits.basemap import Basemap from datetime import datetime, timedelta from dateutil import rrule, parser from scipy.io import loadmat, savemat from scipy import signal from scipy.signal import savgol_filter from glob import glob from netCDF4 import Dataset, num2date, date2num # from pandas import rolling_window # FIXME, new pandas way of doing this is, e.g., arr = Series(...).rolling(...).mean() from pandas import Timestamp from gsw import distance from pygeodesy import Datums, VincentyError from pygeodesy.ellipsoidalVincenty import LatLon as LatLon from pygeodesy.sphericalNvector import LatLon as LatLon_sphere def seasonal_avg(t, F): """ USAGE ----- F_seasonal = seasonal_avg(t, F) Calculates the seasonal average of variable F(t). Assumes 't' is a 'datetime.datetime' object. """ tmo = np.array([ti.month for ti in t]) ftmo = [tmo==mo for mo in range(1, 13)] return np.array([F[ft].mean() for ft in ftmo]) def seasonal_std(t, F): """ USAGE ----- F_seasonal = seasonal_std(t, F) Calculates the seasonal standard deviation of variable F(t). Assumes 't' is a 'datetime.datetime' object. """ tmo = np.array([ti.month for ti in t]) ftmo = [tmo==mo for mo in range(1, 13)] return np.array([F[ft].std() for ft in ftmo]) def deseason(t, F): """ USAGE ----- F_nonssn = deseason(t, F) Removes the seasonal signal of variable F(t). Assumes 't' is a 'datetime.datetime' object. Also assumes that F is sampled monthly and only for complete years (i.e., t.size is a multiple of 12). """ Fssn = seasonal_avg(t, F) nyears = int(t.size/12) aux = np.array([]) for n in range(nyears): aux = np.concatenate((aux, Fssn)) return F - aux def blkavg(x, y, every=2): """ Block-averages a variable y(x). Returns its block average and standard deviation and new x axis. """ nx = x.size xblk, yblk, yblkstd = np.array([]), np.array([]), np.array([]) for i in range(every, nx+every, every): yi = y[i-every:i] xblk = np.append(xblk, np.nanmean(x[i-every:i])) yblk = np.append(yblk, np.nanmean(yi)) yblkstd = np.append(yblkstd, np.nanstd(yi)) return xblk, yblk, yblkstd def blkavgdir(x, ydir, every=2, degrees=False, axis=None): """ Block-averages a PERIODIC variable ydir(x). Returns its block average and new x axis. """ nx = x.size xblk, yblk, yblkstd = np.array([]), np.array([]), np.array([]) for i in range(every, nx+every, every): xblk = np.append(xblk, np.nanmean(x[i-every:i])) yblk = np.append(yblk, avgdir(ydir[i-every:i], degrees=degrees, axis=axis)) return xblk, yblk def blkavgt(t, x, every=2): """ Block-averages a variable x(t). Returns its block average and the new t axis. """ nt = t.size units = 'days since 01-01-01' calendar = 'proleptic_gregorian' t = date2num(t, units=units, calendar=calendar) tblk, xblk = np.array([]), np.array([]) for i in range(every, nt+every, every): xi = x[i-every:i] tblk = np.append(tblk, np.nanmean(t[i-every:i])) xblk = np.append(xblk, np.nanmean(xi)) tblk = num2date(tblk, units=units, calendar=calendar) return tblk, xblk def blkapply(x, f, nblks, overlap=0, demean=False, detrend=False, verbose=True): """ Divides array 'x' in 'nblks' blocks and applies function 'f' = f(x) on each block. """ x = np.array(x) assert callable(f), "f must be a function" nx = x.size ni = int(nx/nblks) # Number of data points in each chunk. y = np.zeros(ni) # Array that will receive each block. dn = int(round(ni - overlap*ni)) # How many indices to move forward with # each chunk (depends on the % overlap). # Demean/detrend the full record first (removes the lowest frequencies). # Then, also demean/detrend each block beffore applying f(). if demean: x = x - x.mean() if detrend: x = signal.detrend(x, type='linear') n=0 il, ir = 0, ni while ir<=nx: xn = x[il:ir] if demean: xn = xn - xn.mean() if detrend: xn = signal.detrend(xn, type='linear') y = y + f(xn) # Apply function and accumulate the current bock. il+=dn; ir+=dn n+=1 y /= n # Divide by number of blocks actually used. ncap = nx - il # Number of points left out at the end of array. if verbose: print("") print("Left last %d data points out (%.1f %% of all points)."%(ncap,100*ncap/nx)) if overlap>0: print("") print("Intended %d blocks, but could fit %d blocks, with"%(nblks,n)) print('overlap of %.1f %%, %d points per block.'%(100*overlap,dn)) print("") return y def stripmsk(arr, mask_invalid=False): if mask_invalid: arr = np.ma.masked_invalid(arr) if np.ma.isMA(arr): msk = arr.mask arr = arr.data arr[msk] = np.nan return arr def pydatetime2m_arr(pydt_arr): pydt_arr = np.array(pydt_arr) secperyr = 86400.0 timedt = timedelta(days=366) matdt = [] for pydt in pydt_arr.tolist(): m = pydt.toordinal() + timedt dfrac = pydt - datetime(pydt.year,pydt.month,pydt.day,0,0,0).seconds/secperyr matdt.append(m.toordinal() + dfrac) return np.array(matdt) def m2pydatetime_arr(mdatenum_arr): mdatenum_arr = np.array(mdatenum_arr) timedt = timedelta(days=366) pydt = [] for mdt in mdatenum_arr.tolist(): d = datetime.fromordinal(int(mdt)) dfrac = timedelta(days=mdt%1) - timedt pydt.append(d + dfrac) return np.array(pydt) def npdt2dt(tnp): """ USAGE ----- t_datetime = npdt2dt(t_numpydatetime64) Convert an array of numpy.datetime64 timestamps to datetime.datetime. """ return np.array([Timestamp(ti).to_pydatetime() for ti in tnp]) def dt2sfloat(t): """ USAGE ----- t_float = dt2sfloat(t_datetime) Convert an array of datetime.datetime timestamps to an array of floats representing elapsed seconds since the first timestamp. """ t = np.array(t) t0 = t[0] return np.array([(tn - t0).total_seconds() for tn in t]) def doy2date(doy, year=2017): """ USAGE ----- t = doy2date(doy, year=2017) Convert an array `doy` of decimal yeardays to an array of datetime.datetime timestamps. """ doy = np.array(doy)*86400 # [seconds/day]. tunit = 'seconds since %d-01-01 00:00:00'%year return np.array([num2date(dn, tunit) for dn in doy]) def flowfun(x, y, u, v, variable='psi', geographic=True): """ FLOWFUN Computes the potential PHI and the streamfunction PSI of a 2-dimensional flow defined by the matrices of velocity components U and V, so that d(PHI) d(PSI) d(PHI) d(PSI) u = ----- - ----- , v = ----- + ----- dx dy dx dy P = FLOWFUN(x,y,u,v) returns an array P of the same size as u and v, which can be the velocity potential (PHI) or the streamfunction (PSI) Because these scalar fields are defined up to the integration constant, their absolute values are such that PHI[0,0] = PSI[0,0] = 0. For a potential (irrotational) flow PSI = 0, and the Laplacian of PSI is equal to the divergence of the velocity field. A solenoidal (non-divergent) flow can be described by the streamfunction alone, and the Laplacian of the streamfunction is equal to the vorticity (curl) of the velocity field. The units of the grid coordinates are assumed to be consistent with the units of the velocity components, e.g., [m] and [m/s]. If variable=='psi', the streamfunction (PSI) is returned. If variable=='phi', the velocity potential (PHI) is returned. If geographic==True (default), (x,y) are assumed to be (longitude,latitude) and are converted to meters before computing (dx,dy). If geographic==False, (x,y) are assumed to be in meters. Uses function 'cumsimp()' (Simpson rule summation). Author: Kirill K. Pankratov, March 7, 1994. Source: http://www-pord.ucsd.edu/~matlab/stream.htm Translated to Python by André Palóczy, January 15, 2015. Modified by André Palóczy on January 15, 2015. """ x,y,u,v = map(np.asanyarray, (x,y,u,v)) if not x.shape==y.shape==u.shape==v.shape: print("Error: Arrays (x, y, u, v) must be of equal shape.") return ## Calculating grid spacings. if geographic: dlat, _ = np.gradient(y) _, dlon = np.gradient(x) deg2m = 111120.0 # [m/deg] dx = dlon*deg2m*np.cos(y*np.pi/180.) # [m] dy = dlat*deg2m # [m] else: dy, _ = np.gradient(y) _, dx = np.gradient(x) ly, lx = x.shape # Shape of the (x,y,u,v) arrays. ## Now the main computations. ## Integrate velocity fields to get potential and streamfunction. ## Use Simpson rule summation (function CUMSIMP). ## Compute velocity potential PHI (non-rotating part). if variable=='phi': cx = cumsimp(u[0,:]*dx[0,:]) # Compute x-integration constant cy = cumsimp(v[:,0]*dy[:,0]) # Compute y-integration constant cx = np.expand_dims(cx, 0) cy = np.expand_dims(cy, 1) phiy = cumsimp(v*dy) + np.tile(cx, (ly,1)) phix = cumsimp(u.T*dx.T).T + np.tile(cy, (1,lx)) phi = (phix + phiy)/2. return phi ## Compute streamfunction PSI (non-divergent part). if variable=='psi': cx = cumsimp(v[0,:]*dx[0,:]) # Compute x-integration constant cy = cumsimp(u[:,0]*dy[:,0]) # Compute y-integration constant cx = np.expand_dims(cx, 0) cy = np.expand_dims(cy, 1) psix = -cumsimp(u*dy) + np.tile(cx, (ly,1)) psiy = cumsimp(v.T*dx.T).T - np.tile(cy, (1,lx)) psi = (psix + psiy)/2. return psi def cumsimp(y): """ F = CUMSIMP(Y) Simpson-rule column-wise cumulative summation. Numerical approximation of a function F(x) such that Y(X) = dF/dX. Each column of the input matrix Y represents the value of the integrand Y(X) at equally spaced points X = 0,1,...size(Y,1). The output is a matrix F of the same size as Y. The first row of F is equal to zero and each following row is the approximation of the integral of each column of matrix Y up to the givem row. CUMSIMP assumes continuity of each column of the function Y(X) and uses Simpson rule summation. Similar to the command F = CUMSUM(Y), exept for zero first row and more accurate summation (under the assumption of continuous integrand Y(X)). Author: Kirill K. Pankratov, March 7, 1994. Source: http://www-pord.ucsd.edu/~matlab/stream.htm Translated to Python by André Palóczy, January 15, 2015. """ y = np.asanyarray(y) ## 3-point interpolation coefficients to midpoints. ## Second-order polynomial (parabolic) interpolation coefficients ## from Xbasis = [0 1 2] to Xint = [.5 1.5] c1 = 3/8. c2 = 6/8. c3 = -1/8. if y.ndim==1: y = np.expand_dims(y,1) f = np.zeros((y.size,1)) # Initialize summation array. squeeze_after = True elif y.ndim==2: f = np.zeros(y.shape) # Initialize summation array. squeeze_after = False else: print("Error: Input array has more than 2 dimensions.") return if y.size==2: # If only 2 elements in columns - simple average. f[1,:] = (y[0,:] + y[1,:])/2. return f else: # If more than two elements in columns - Simpson summation. ## Interpolate values of y to all midpoints. f[1:-1,:] = c1*y[:-2,:] + c2*y[1:-1,:] + c3*y[2:,:] f[2:,:] = f[2:,:] + c3*y[:-2,:] + c2*y[1:-1,:] + c1*y[2:,:] f[1,:] = f[1,:]*2 f[-1,:] = f[-1,:]*2 ## Simpson (1,4,1) rule. f[1:,:] = 2*f[1:,:] + y[:-1,:] + y[1:,:] f = np.cumsum(f, axis=0)/6. # Cumulative sum, 6 - denominator from the Simpson rule. if squeeze_after: f = f.squeeze() return f def rot_vec(u, v, angle=-45, degrees=True): """ USAGE ----- u_rot,v_rot = rot_vec(u,v,angle=-45.,degrees=True) Returns the rotated vector components (`u_rot`,`v_rot`) from the zonal-meridional input vector components (`u`,`v`). The rotation is done using the angle `angle` positive counterclockwise (trigonometric convention). If `degrees` is set to `True``(default), then `angle` is converted to radians. is Example ------- >>> from matplotlib.pyplot import quiver >>> from ap_tools.utils import rot_vec >>> u = -1. >>> v = -1. >>> u2,v2 = rot_vec(u,v, angle=-30.) """ u,v = map(np.asanyarray, (u,v)) if degrees: angle = angle*np.pi/180. # Degrees to radians. u_rot = +u*np.cos(angle) + v*np.sin(angle) # Usually the across-shore component. v_rot = -u*np.sin(angle) + v*np.cos(angle) # Usually the along-shore component. return u_rot,v_rot def avgdir(dirs, degrees=False, axis=None): """ USAGE ----- dirm = avgdir(dirs, degrees=False, axis=None) Calculate the mean direction of an array of directions 'dirs'. If 'degrees' is 'False' (default), the input directions must be in radians. If 'degrees' is 'True', the input directions must be in degrees. The direction angle is measured from the ZONAL axis, i.e., (0, 90, -90) deg are (Eastward, Northward, Southward). 180 and -180 deg are both Westward. If 'axis' is 'None' (default) the mean is calculated on the flattened array. Otherwise, 'axis' is the index of the axis to calculate the mean over. """ dirs = np.array(dirs) if degrees: dirs = dirs*np.pi/180 # Degrees to radians. uxs = np.cos(dirs) vys = np.sin(dirs) dirm = np.arctan2(vys.sum(axis=axis), uxs.sum(axis=axis)) if degrees: dirm = dirm*180/np.pi # From radians to degrees. return dirm def lon180to360(lon): """ Converts longitude values in the range [-180,+180] to longitude values in the range [0,360]. """ lon = np.asanyarray(lon) return (lon + 360.0) % 360.0 def lon360to180(lon): """ Converts longitude values in the range [0,360] to longitude values in the range [-180,+180]. """ lon = np.asanyarray(lon) return ((lon + 180.) % 360.) - 180. def bbox2ij(lon, lat, bbox=[-135., -85., -76., -64.], FIX_IDL=True): """ USAGE ----- ilon_start, ilon_end, jlat_start, jlat_end = bbox2ij(lon, lat, bbox=[-135., -85., -76., -64.], FIX_IDL=True) OR (ilon_start_left, ilon_end_left, jlat_start, jlat_end), (ilon_start_right, ilon_end_right, jlat_start, jlat_end) = ... ... bbox2ij(lon, lat, bbox=[-135., -85., -76., -64.], FIX_IDL=True) Return indices for i,j that will completely cover the specified bounding box. 'lon' and 'lat' are 2D coordinate arrays (generated by meshgrid), and 'bbox' is a list like [lon_start, lon_end, lat_start, lat_end] describing the desired longitude-latitude box. If the specified bbox is such that it crosses the edges of the longitude array, two tuples of indices are returned. The first (second) tuple traces out the left (right) part of the bbox. If FIX_IDL is set to 'True' (default), the indices returned correspond to the "short route" around the globe, which amounts to assuming that the specified bbox crosses the International Date. If FIX_IDL is set to 'False', the "long route" is used instead. Example ------- >>> import numpy as np >>> import matplotlib.pyplot as plt >>> lon = np.arange(-180., 180.25, 0.25) >>> lat = np.arange(-90., 90.25, 0.25) >>> lon, lat = np.meshgrid(lon, lat) >>> h = np.sin(lon) + np.cos(lat) >>> i0, i1, j0, j1 = bbox2ij(lon, lat, bbox=[-71, -63., 39., 46]) >>> h_subset = h[j0:j1,i0:i1] >>> lon_subset = lon[j0:j1,i0:i1] >>> lat_subset = lat[j0:j1,i0:i1] >>> fig, ax = plt.subplots() >>> ax.pcolor(lon_subset,lat_subset,h_subset) >>> plt.axis('tight') Original function downloaded from http://gis.stackexchange.com/questions/71630/subsetting-a-curvilinear-netcdf-file-roms-model-output-using-a-lon-lat-boundin Modified by André Palóczy on August 20, 2016 to handle bboxes that cross the International Date Line or the edges of the longitude array. """ lon, lat, bbox = map(np.asanyarray, (lon, lat, bbox)) # Test whether the wanted bbox crosses the International Date Line (brach cut of the longitude array). dlon = bbox[:2].ptp() IDL_BBOX=dlon>180. IDL_BBOX=np.logical_and(IDL_BBOX, FIX_IDL) mypath = np.array([bbox[[0,1,1,0]], bbox[[2,2,3,3]]]).T p = path.Path(mypath) points = np.vstack((lon.flatten(), lat.flatten())).T n, m = lon.shape inside = p.contains_points(points).reshape((n, m)) # Fix mask if bbox goes throught the International Date Line. if IDL_BBOX: fcol=np.all(~inside, axis=0) flin=np.any(inside, axis=1) fcol, flin = map(np.expand_dims, (fcol, flin), (0, 1)) fcol = np.tile(fcol, (n, 1)) flin = np.tile(flin, (1, m)) inside=np.logical_and(flin, fcol) print("Bbox crosses the International Date Line.") ii, jj = np.meshgrid(range(m), range(n)) iiin, jjin = ii[inside], jj[inside] i0, i1, j0, j1 = min(iiin), max(iiin), min(jjin), max(jjin) SPLIT_BBOX=(i1-i0)==(m-1) # Test whether the wanted bbox crosses edges of the longitude array. # If wanted bbox crosses edges of the longitude array, return indices for the two boxes separately. if SPLIT_BBOX: Iiin = np.unique(iiin) ib0 = np.diff(Iiin).argmax() # Find edge of the inner side of the left bbox. ib1 = ib0 + 1 # Find edge of the inner side of the right bbox. Il, Ir = Iiin[ib0], Iiin[ib1] # Indices of the columns that bound the inner side of the two bboxes. print("Bbox crosses edges of the longitude array. Returning two sets of indices.") return (i0, Il, j0, j1), (Ir, i1, j0, j1) else: return i0, i1, j0, j1 def xy2dist(x, y, cyclic=False, datum='WGS84'): """ USAGE ----- d = xy2dist(x, y, cyclic=False, datum='WGS84') Calculates a distance axis from a line defined by longitudes and latitudes 'x' and 'y', using either the Vicenty formulae on an ellipsoidal earth (ellipsoid defaults to WGS84) or on a sphere (if datum=='Sphere'). Example ------- >>> yi, yf = -23.550520, 32.71573800 >>> xi, xf = -46.633309, -117.161084 >>> x, y = np.linspace(xi, xf), np.linspace(yi, yf) >>> d_ellipse = xy2dist(x, y, datum='WGS84')[-1]*1e-3 # [km]. >>> d_sphere = xy2dist(x, y, datum='Sphere')[-1]*1e-3 # [km]. >>> dd = np.abs(d_ellipse - d_sphere) >>> dperc = 100*dd/d_ellipse >>> msg = 'Difference of %.1f km over a %.0f km-long line (%.3f %% difference)'%(dd, d_ellipse, dperc) >>> print(msg) """ if datum!="Sphere": xy = [LatLon(y0, x0, datum=Datums[datum]) for x0, y0 in zip(x, y)] else: xy = [LatLon_sphere(y0, x0) for x0, y0 in zip(x, y)] d = np.array([xy[n].distanceTo(xy[n+1]) for n in range(len(xy)-1)]) return np.append(0, np.cumsum(d)) def get_xtrackline(lon1, lon2, lat1, lat2, L=200, dL=10): """ USAGE ----- lonp, latp = get_xtrackline(lon1, lon2, lat1, lat2, L=200, dL=13) Generates a great-circle line with length 2L (with L in km) that is perpendicular to the great-circle line defined by the input points (lon1, lat1) and (lon2, lat2). The spacing between the points along the output line is dL km. Assumes a spherical Earth. """ km2m = 1e3 L, dL = L*km2m, dL*km2m nh = int(L/dL) p1, p2 = LatLon_sphere(lat1, lon1), LatLon_sphere(lat2, lon2) angperp = p1.initialBearingTo(p2) + 90 angperpb = angperp + 180 pm = p1.midpointTo(p2) # Create perpendicular line starting from the midpoint. N = range(1, nh + 1) pperp = [] _ = [pperp.append(pm.destination(dL*n, angperpb)) for n in N] pperp.reverse() pperp.append(pm) _ = [pperp.append(pm.destination(dL*n, angperp)) for n in N] lonperp = np.array([p.lon for p in pperp]) latperp = np.array([p.lat for p in pperp]) return lonperp, latperp def get_arrdepth(arr): """ USAGE ----- arr_depths = get_arrdepth(arr) Determine number of nested levels in each element of an array of arrays of arrays... (or other array-like objects). """ arr = np.array(arr) # Make sure first level is an array. all_nlevs = [] for i in range(arr.size): nlev=0 wrk_arr = arr[i] while np.size(wrk_arr)>0: try: wrk_arr = np.array(wrk_arr[i]) except Exception: all_nlevs.append(nlev) nlev=0 break nlev+=1 return np.array(all_nlevs) def fpointsbox(x, y, fig, ax, nboxes=1, plot=True, pause_secs=5, return_index=True): """ USAGE ----- fpts = fpointsbox(x, y, fig, ax, nboxes=1, plot=True, pause_secs=5, return_index=True) Find points in a rectangle made with 2 ginput points. """ fpts = np.array([]) for n in range(nboxes): box = np.array(fig.ginput(n=2, timeout=0)) try: xb, yb = box[:,0], box[:,1] except IndexError: print("No points selected. Skipping box \# %d."%(n+1)) continue xl, xr, yd, yu = xb.min(), xb.max(), yb.min(), yb.max() xbox = np.array([xl, xr, xr, xl, xl]) ybox = np.array([yd, yd, yu, yu, yd]) fxbox, fybox = np.logical_and(x>xl, x<xr), np.logical_and(y>yd, y<yu) fptsi = np.logical_and(fxbox, fybox) if return_index: fptsi = np.where(fptsi)[0] fpts = np.append(fpts, fptsi) if plot: ax.plot(xbox, ybox, 'r', linestyle='solid', marker='o', ms=4) ax.plot(x[fptsi], y[fptsi], 'r', linestyle='none', marker='+', ms=5) plt.draw() fig.show() else: fig.close() if plot: plt.draw() fig.show() system("sleep %d"%pause_secs) return fpts def near(x, x0, npts=1, return_index=False): """ USAGE ----- xnear = near(x, x0, npts=1, return_index=False) Finds 'npts' points (defaults to 1) in array 'x' that are closest to a specified 'x0' point. If 'return_index' is True (defauts to False), then the indices of the closest points are returned. The indices are ordered in order of closeness. """ x = list(x) xnear = [] xidxs = [] for n in range(npts): idx = np.nanargmin(np.abs(np.array(x)-x0)) xnear.append(x.pop(idx)) if return_index: xidxs.append(idx) if return_index: # Sort indices according to the proximity of wanted points. xidxs = [xidxs[i] for i in np.argsort(xnear).tolist()] xnear.sort() if npts==1: xnear = xnear[0] if return_index: xidxs = xidxs[0] else: xnear = np.array(xnear) if return_index: return xidxs else: return xnear def near2(x, y, x0, y0, npts=1, return_index=False): """ USAGE ----- xnear, ynear = near2(x, y, x0, y0, npts=1, return_index=False) Finds 'npts' points (defaults to 1) in arrays 'x' and 'y' that are closest to a specified '(x0, y0)' point. If 'return_index' is True (defauts to False), then the indices of the closest point(s) are returned. Example ------- >>> x = np.arange(0., 100., 0.25) >>> y = np.arange(0., 100., 0.25) >>> x, y = np.meshgrid(x, y) >>> x0, y0 = 44.1, 30.9 >>> xn, yn = near2(x, y, x0, y0, npts=1) >>> print("(x0, y0) = (%f, %f)"%(x0, y0)) >>> print("(xn, yn) = (%f, %f)"%(xn, yn)) """ x, y = map(np.array, (x, y)) shp = x.shape xynear = [] xyidxs = [] dx = x - x0 dy = y - y0 dr = dx**2 + dy**2 for n in range(npts): xyidx = np.unravel_index(np.nanargmin(dr), dims=shp) if return_index: xyidxs.append(xyidx) xyn = (x[xyidx], y[xyidx]) xynear.append(xyn) dr[xyidx] = np.nan if npts==1: xynear = xynear[0] if return_index: xyidxs = xyidxs[0] if return_index: return xyidxs else: return xynear def mnear(x, y, x0, y0): """ USAGE ----- xmin,ymin = mnear(x, y, x0, y0) Finds the the point in a (lons,lats) line that is closest to a specified (lon0,lat0) point. """ x,y,x0,y0 = map(np.asanyarray, (x,y,x0,y0)) point = (x0,y0) d = np.array([]) for n in range(x.size): xn,yn = x[n],y[n] dn = distance((xn,x0),(yn,y0)) # Calculate distance point-wise. d = np.append(d,dn) idx = d.argmin() return x[idx],y[idx] def refine(line, nref=100, close=True): """ USAGE ----- ref_line = refine(line, nref=100, close=True) Given a 1-D sequence of points 'line', returns a new sequence 'ref_line', which is built by linearly interpolating 'nref' points between each pair of subsequent points in the original line. If 'close' is True (default), the first value of the original line is repeated at the end of the refined line, as in a closed polygon. """ line = np.squeeze(np.asanyarray(line)) if close: line = np.append(line,line[0]) ref_line = np.array([]) for n in range(line.shape[0]-1): xi, xf = line[n], line[n+1] xref = np.linspace(xi,xf,nref) ref_line = np.append(ref_line, xref) return ref_line def point_in_poly(x,y,poly): """ USAGE ----- isinside = point_in_poly(x,y,poly) Determine if a point is inside a given polygon or not Polygon is a list of (x,y) pairs. This fuction returns True or False. The algorithm is called 'Ray Casting Method'. Source: http://pseentertainmentcorp.com/smf/index.php?topic=545.0 """ n = len(poly) inside = False p1x,p1y = poly[0] for i in range(n+1): p2x,p2y = poly[i % n] if y > min(p1y,p2y): if y <= max(p1y,p2y): if x <= max(p1x,p2x): if p1y != p2y: xinters = (y-p1y)*(p2x-p1x)/(p2y-p1y)+p1x if p1x == p2x or x <= xinters: inside = not inside p1x,p1y = p2x,p2y return inside def get_mask_from_poly(xp, yp, poly, verbose=False): """ USAGE ----- mask = get_mask_from_poly(xp, yp, poly, verbose=False) Given two arrays 'xp' and 'yp' of (x,y) coordinates (generated by meshgrid) and a polygon defined by an array of (x,y) coordinates 'poly', with shape = (n,2), return a boolean array 'mask', where points that lie inside 'poly' are set to 'True'. """ print('Building the polygon mask...') jmax, imax = xp.shape mask = np.zeros((jmax,imax)) for j in range(jmax): if verbose: print("Row %s of %s"%(j+1,jmax)) for i in range(imax): px, py = xp[j,i], yp[j,i] # Test if this point is within the polygon. mask[j,i] = point_in_poly(px, py, poly) return mask def sphericalpolygon_area(lons, lats, R=6371000.): """ USAGE ----- area = sphericalpolygon_area(lons, lats, R=6371000.) Calculates the area of a polygon on the surface of a sphere of radius R using Girard's Theorem, which states that the area of a polygon of great circles is R**2 times the sum of the angles between the polygons minus (N-2)*pi, where N is number of corners. R = 6371000 m (6371 km, default) is a typical value for the mean radius of the Earth. Source: http://stackoverflow.com/questions/4681737/how-to-calculate-the-area-of-a-polygon-on-the-earths-surface-using-python """ lons, lats = map(np.asanyarray, (lons, lats)) N = lons.size angles = np.empty(N) for i in range(N): phiB1, phiA, phiB2 = np.roll(lats, i)[:3] LB1, LA, LB2 = np.roll(lons, i)[:3] # calculate angle with north (eastward) beta1 = greatCircleBearing(LA, phiA, LB1, phiB1) beta2 = greatCircleBearing(LA, phiA, LB2, phiB2) # calculate angle between the polygons and add to angle array angles[i] = np.arccos(np.cos(-beta1)*np.cos(-beta2) + np.sin(-beta1)*np.sin(-beta2)) return (np.sum(angles) - (N-2)*np.pi)*R**2 def greatCircleBearing(lon1, lat1, lon2, lat2): """ USAGE ----- angle = greatCircleBearing(lon1, lat1, lon2, lat2) Calculates the angle (positive eastward) a great circle passing through points (lon1,lat1) and (lon2,lat2) makes with true nirth. Source: http://stackoverflow.com/questions/4681737/how-to-calculate-the-area-of-a-polygon-on-the-earths-surface-using-python """ lon1, lat1, lon2, lat2 = map(np.asanyarray, (lon1, lat1, lon2, lat2)) dLong = lon1 - lon2 d2r = np.pi/180. s = np.cos(d2r*lat2)*np.sin(d2r*dLong) c = np.cos(d2r*lat1)*np.sin(d2r*lat2) - np.sin(lat1*d2r)*np.cos(d2r*lat2)*np.cos(d2r*dLong) return np.arctan2(s, c) def weim(x, N, kind='hann', badflag=-9999, beta=14): """ Usage ----- xs = weim(x, N, kind='hann', badflag=-9999, beta=14) Description ----------- Calculates the smoothed array 'xs' from the original array 'x' using the specified window of type 'kind' and size 'N'. 'N' must be an odd number. Parameters ---------- x : 1D array Array to be smoothed. N : integer Window size. Must be odd. kind : string, optional One of the window types available in the numpy module: hann (default) : Gaussian-like. The weight decreases toward the ends. Its end-points are zeroed. hamming : Similar to the hann window. Its end-points are not zeroed, therefore it is discontinuous at the edges, and may produce undesired artifacts. blackman : Similar to the hann and hamming windows, with sharper ends. bartlett : Triangular-like. Its end-points are zeroed. kaiser : Flexible shape. Takes the optional parameter "beta" as a shape parameter. For beta=0, the window is rectangular. As beta increases, the window gets narrower. Refer to the numpy functions for details about each window type. badflag : float, optional The bad data flag. Elements of the input array 'A' holding this value are ignored. beta : float, optional Shape parameter for the kaiser window. For windows other than the kaiser window, this parameter does nothing. Returns ------- xs : 1D array The smoothed array. --------------------------------------- André Palóczy Filho (paloczy@gmail.com) June 2012 ============================================================================================================== """ ########################################### ### Checking window type and dimensions ### ########################################### kinds = ['hann', 'hamming', 'blackman', 'bartlett', 'kaiser'] if ( kind not in kinds ): raise ValueError('Invalid window type requested: %s'%kind) if np.mod(N,2) == 0: raise ValueError('Window size must be odd') ########################### ### Creating the window ### ########################### if ( kind == 'kaiser' ): # If the window kind is kaiser (beta is required). wstr = 'np.kaiser(N, beta)' else: # If the window kind is hann, hamming, blackman or bartlett (beta is not required). if kind == 'hann': kind = 'hanning' wstr = 'np.' + kind + '(N)' w = eval(wstr) x = np.asarray(x).flatten() Fnan = np.isnan(x).flatten() ln = (N-1)/2 lx = x.size lf = lx - ln xs = np.nan*np.ones(lx) # Eliminating bad data from mean computation. fbad=x==badflag x[fbad] = np.nan for i in range(lx): if i <= ln: xx = x[:ln+i+1] ww = w[ln-i:] elif i >= lf: xx = x[i-ln:] ww = w[:lf-i-1] else: xx = x[i-ln:i+ln+1] ww = w.copy() f = ~np.isnan(xx) # Counting only NON-NaNs, both in the input array and in the window points. xx = xx[f] ww = ww[f] if f.sum() == 0: # Thou shalt not divide by zero. xs[i] = x[i] else: xs[i] = np.sum(xx*ww)/np.sum(ww) xs[Fnan] = np.nan # Assigning NaN to the positions holding NaNs in the input array. return xs def smoo2(A, hei, wid, kind='hann', badflag=-9999, beta=14): """ Usage ----- As = smoo2(A, hei, wid, kind='hann', badflag=-9999, beta=14) Description ----------- Calculates the smoothed array 'As' from the original array 'A' using the specified window of type 'kind' and shape ('hei','wid'). Parameters ---------- A : 2D array Array to be smoothed. hei : integer Window height. Must be odd and greater than or equal to 3. wid : integer Window width. Must be odd and greater than or equal to 3. kind : string, optional One of the window types available in the numpy module: hann (default) : Gaussian-like. The weight decreases toward the ends. Its end-points are zeroed. hamming : Similar to the hann window. Its end-points are not zeroed, therefore it is discontinuous at the edges, and may produce undesired artifacts. blackman : Similar to the hann and hamming windows, with sharper ends. bartlett : Triangular-like. Its end-points are zeroed. kaiser : Flexible shape. Takes the optional parameter "beta" as a shape parameter. For beta=0, the window is rectangular. As beta increases, the window gets narrower. Refer to the numpy functions for details about each window type. badflag : float, optional The bad data flag. Elements of the input array 'A' holding this value are ignored. beta : float, optional Shape parameter for the kaiser window. For windows other than the kaiser window, this parameter does nothing. Returns ------- As : 2D array The smoothed array. --------------------------------------- André Palóczy Filho (paloczy@gmail.com) April 2012 ============================================================================================================== """ ########################################### ### Checking window type and dimensions ### ########################################### kinds = ['hann', 'hamming', 'blackman', 'bartlett', 'kaiser'] if ( kind not in kinds ): raise ValueError('Invalid window type requested: %s'%kind) if ( np.mod(hei,2) == 0 ) or ( np.mod(wid,2) == 0 ): raise ValueError('Window dimensions must be odd') if (hei <= 1) or (wid <= 1): raise ValueError('Window shape must be (3,3) or greater') ############################## ### Creating the 2D window ### ############################## if ( kind == 'kaiser' ): # If the window kind is kaiser (beta is required). wstr = 'np.outer(np.kaiser(hei, beta), np.kaiser(wid, beta))' else: # If the window kind is hann, hamming, blackman or bartlett (beta is not required). if kind == 'hann': kind = 'hanning' # computing outer product to make a 2D window out of the original 1d windows. wstr = 'np.outer(np.' + kind + '(hei), np.' + kind + '(wid))' wdw = eval(wstr) A = np.asanyarray(A) Fnan = np.isnan(A) imax, jmax = A.shape As = np.nan*np.ones( (imax, jmax) ) for i in range(imax): for j in range(jmax): ### Default window parameters. wupp = 0 wlow = hei wlef = 0 wrig = wid lh = np.floor(hei/2) lw = np.floor(wid/2) ### Default array ranges (functions of the i,j indices). upp = i-lh low = i+lh+1 lef = j-lw rig = j+lw+1 ################################################## ### Tiling window and input array at the edges ### ################################################## # Upper edge. if upp < 0: wupp = wupp-upp upp = 0 # Left edge. if lef < 0: wlef = wlef-lef lef = 0 # Bottom edge. if low > imax: ex = low-imax wlow = wlow-ex low = imax # Right edge. if rig > jmax: ex = rig-jmax wrig = wrig-ex rig = jmax ############################################### ### Computing smoothed value at point (i,j) ### ############################################### Ac = A[upp:low, lef:rig] wdwc = wdw[wupp:wlow, wlef:wrig] fnan = np.isnan(Ac) Ac[fnan] = 0; wdwc[fnan] = 0 # Eliminating NaNs from mean computation. fbad = Ac==badflag wdwc[fbad] = 0 # Eliminating bad data from mean computation. a = Ac * wdwc As[i,j] = a.sum() / wdwc.sum() As[Fnan] = np.nan # Assigning NaN to the positions holding NaNs in the input array. return As def denan(arr): """ USAGE ----- denaned_arr = denan(arr) Remove the NaNs from an array. """ f = np.isnan(arr) return arr[~f] def standardize(series): """ USAGE ----- series2 = standardize(series) Standardizes a series by subtracting its mean value and dividing by its standard deviation. The result is a dimensionless series. Inputs can be of type "np.array", or "Pandas.Series"/"Pandas.TimeSeries". """ Mean, Std = series.mean(), series.std() return (series - Mean)/Std def linear_trend(series, return_line=True): """ USAGE ----- line = linear_trend(series, return_line=True) OR b, a, x = linear_trend(series, return_line=False) Returns the linear fit (line = b*x + a) associated with the 'series' array. Adapted from pylab.detrend_linear. """ series = np.asanyarray(series) x = np.arange(series.size, dtype=np.float_) C = np.cov(x, series, bias=1) # Covariance matrix. b = C[0, 1]/C[0, 0] # Angular coefficient. a = series.mean() - b*x.mean() # Linear coefficient. line = b*x + a if return_line: return line else: return b, a, x def thomas(A, b): """ USAGE ----- x = thomas(A,b) Solve Ax = b (where A is a tridiagonal matrix) using the Thomas Algorithm. References ---------- For a step-by-step derivation of the algorithm, see e.g., http://www3.ul.ie/wlee/ms6021_thomas.pdf """ # Step 1: Sweep rows from top to bottom, # calculating gammas and rhos along the way. N = b.size gam = [float(A[0,1]/A[0,0])] rho = [float(b[0]/A[0,0])] for i in range(0, N): rho.append(float((b[i] - A[i,i-1]*rho[-1])/(A[i,i] - A[i,i-1]*gam[-1]))) if i<N-1: # No gamma in the last row. gam.append(float(A[i,i+1]/(A[i,i] - A[i,i-1]*gam[-1]))) # Step 2: Substitute solutions for unknowns # starting from the bottom row all the way up. x = [] # Vector of unknowns. x.append(rho.pop()) # Last row is already solved. for i in range(N-2, -1, -1): x.append(float(rho.pop() - gam.pop()*x[-1])) x.reverse() return np.array(x) def topo_slope(lon, lat, h): """ USAGE ----- lons, lats, slope = topo_slope(lon, lat, h) Calculates bottom slope for a topography fields 'h' at coordinates ('lon', 'lat') using first-order finite differences. The output arrays have shape (M-1,L-1), where M,L = h.shape(). """ lon,lat,h = map(np.asanyarray, (lon,lat,h)) deg2m = 1852.*60. # m/deg. deg2rad = np.pi/180. # rad/deg. x = lon*deg2m*np.cos(lat*deg2rad) y = lat*deg2m # First-order differences, accurate to O(dx) and O(dy), # respectively. sx = (h[:,1:] - h[:,:-1]) / (x[:,1:] - x[:,:-1]) sy = (h[1:,:] - h[:-1,:]) / (y[1:,:] - y[:-1,:]) # Finding the values of the derivatives sx and sy # at the same location in physical space. sx = 0.5*(sx[1:,:]+sx[:-1,:]) sy = 0.5*(sy[:,1:]+sy[:,:-1]) # Calculating the bottom slope. slope = np.sqrt(sx**2 + sy**2) # Finding the lon,lat coordinates of the # values of the derivatives sx and sy. lons = 0.5*(lon[1:,:]+lon[:-1,:]) lats = 0.5*(lat[1:,:]+lat[:-1,:]) lons = 0.5*(lons[:,1:]+lons[:,:-1]) lats = 0.5*(lats[:,1:]+lats[:,:-1]) return lons, lats, slope def curvature_geometric(x, y): """ USAGE ----- k = curvature_geometric(x, y) Estimates the curvature k of a 2D curve (x,y) using a geometric method. If your curve is given by two arrays, x and y, you can approximate its curvature at each point by the reciprocal of the radius of a circumscribing triangle with that point, the preceding point, and the succeeding point as vertices. The radius of such a triangle is one fourth the product of the three sides divided by its area. The curvature will be positive for curvature to the left and negative for curvature to the right as you advance along the curve. Note that if your data are too closely spaced together or subject to substantial noise errors, this formula will not be very accurate. Author: Roger Stafford Source: http://www.mathworks.com/matlabcentral/newsreader/view_thread/125637 Translated to Python by André Palóczy, January 19, 2015. """ x,y = map(np.asanyarray, (x,y)) x1 = x[:-2]; x2 = x[1:-1]; x3 = x[2:] y1 = y[:-2]; y2 = y[1:-1]; y3 = y[2:] ## a, b, and c are the three sides of the triangle. a = np.sqrt((x3-x2)**2 + (y3-y2)**2) b = np.sqrt((x1-x3)**2 + (y1-y3)**2) c = np.sqrt((x2-x1)**2 + (y2-y1)**2) ## A is the area of the triangle. A = 0.5*(x1*y2 + x2*y3 + x3*y1 - x1*y3 - x2*y1 - x3*y2) ## The reciprocal of the circumscribed radius, i.e., the curvature. k = 4.0*A/(a*b*c) return np.squeeze(k) def get_isobath(lon, lat, topo, iso, cyclic=False, smooth_isobath=False, window_length=21, win_type='barthann', **kw): """ USAGE ----- lon_isob, lat_isob = get_isobath(lon, lat, topo, iso, cyclic=False, smooth_isobath=False, window_length=21, win_type='barthann', **kw) Retrieves the 'lon_isob','lat_isob' coordinates of a wanted 'iso' isobath from a topography array 'topo', with 'lon_topo','lat_topo' coordinates. """ lon, lat, topo = map(np.array, (lon, lat, topo)) fig, ax = plt.subplots() cs = ax.contour(lon, lat, topo, [iso]) coll = cs.collections[0] ## Test all lines to find thel ongest one. ## This is assumed to be the wanted isobath. ncoll = len(coll.get_paths()) siz = np.array([]) for n in range(ncoll): path = coll.get_paths()[n] siz = np.append(siz, path.vertices.shape[0]) f = siz.argmax() xiso = coll.get_paths()[f].vertices[:, 0] yiso = coll.get_paths()[f].vertices[:, 1] plt.close() # Smooth the isobath with a moving window. # Periodize according to window length to avoid losing edges. if smooth_isobath: fleft = window_length//2 fright = -window_length//2 + 1 if cyclic: xl = xiso[:fleft] + 360 xr = xiso[fright:] - 360 yl = yiso[:fleft] yr = yiso[fright:] xiso = np.concatenate((xr, xiso, xl)) yiso = np.concatenate((yr, yiso, yl)) # xiso = rolling_window(xiso, window=window_length, win_type=win_type, center=True, **kw)[fleft:fright] # FIXME # yiso = rolling_window(yiso, window=window_length, win_type=win_type, center=True, **kw)[fleft:fright] # FIXME # else: # xiso = rolling_window(xiso, window=window_length, win_type=win_type, center=True, **kw) # FIXME # yiso = rolling_window(yiso, window=window_length, win_type=win_type, center=True, **kw) # FIXME return xiso, yiso def angle_isobath(lon, lat, h, isobath=100, cyclic=False, smooth_isobath=True, window_length=21, win_type='barthann', plot_map=False, **kw): """ USAGE ----- lon_isob, lat_isob, angle = angle_isobath(lon, lat, h, isobath=100, cyclic=False, smooth_isobath=True, window_length=21, win_type='barthann', plot_map=False, **kw) Returns the coordinates ('lon_isob', 'lat_isob') and the angle an isobath makes with the zonal direction for a topography array 'h' at coordinates ('lon', 'lat'). Defaults to the 100 m isobath. If 'smooth_isobath'==True, smooths the isobath with a rolling window of type 'win_type' and 'window_length' points wide. All keyword arguments are passed to 'pandas.rolling_window()'. If 'plot_map'==True, plots a map showing the isobath (and its soothed version if smooth_isobath==True). """ lon, lat, h = map(np.array, (lon, lat, h)) R = 6371000.0 # Mean radius of the earth in meters (6371 km), from gsw.constants.earth_radius. deg2rad = np.pi/180. # [rad/deg] # Extract isobath coordinates xiso, yiso = get_isobath(lon, lat, h, isobath) if cyclic: # Add cyclic point. xiso = np.append(xiso, xiso[0]) yiso = np.append(yiso, yiso[0]) # Smooth the isobath with a moving window. if smooth_isobath: xiso = rolling_window(xiso, window=window_length, win_type=win_type, **kw) yiso = rolling_window(yiso, window=window_length, win_type=win_type, **kw) # From the coordinates of the isobath, find the angle it forms with the # zonal axis, using points k+1 and k. shth = yiso.size-1 theta = np.zeros(shth) for k in range(shth): dyk = R*(yiso[k+1]-yiso[k]) dxk = R*(xiso[k+1]-xiso[k])*np.cos(yiso[k]*deg2rad) theta[k] = np.arctan2(dyk,dxk) xisom = 0.5*(xiso[1:] + xiso[:-1]) yisom = 0.5*(yiso[1:] + yiso[:-1]) # Plots map showing the extracted isobath. if plot_map: fig, ax = plt.subplots() m = bb_map([lon.min(), lon.max()], [lat.min(), lat.max()], projection='cyl', resolution='h', ax=ax) m.plot(xisom, yisom, color='b', linestyle='-', zorder=3, latlon=True) input("Press any key to continue.") plt.close() return xisom, yisom, theta def isopyc_depth(z, dens0, isopyc=1027.75, dzref=1.): """ USAGE ----- hisopyc = isopyc_depth(z, dens0, isopyc=1027.75) Calculates the spatial distribution of the depth of a specified isopycnal 'isopyc' (defaults to 1027.75 kg/m3) from a 3D density array rho0 (in kg/m3) with shape (nz,ny,nx) and a 1D depth array 'z' (in m) with shape (nz). 'dzref' is the desired resolution for the refined depth array (defaults to 1 m) which is generated for calculating the depth of the isopycnal. The smaller 'dzref', the smoother the resolution of the returned isopycnal depth array 'hisopyc'. """ z, dens0 = map(np.asanyarray, (z, dens0)) ny, nx = dens0.shape[1:] zref = np.arange(z.min(), z.max(), dzref) if np.ma.isMaskedArray(dens0): dens0 = np.ma.filled(dens0, np.nan) hisopyc = np.nan*np.ones((ny,nx)) for j in range(ny): for i in range(nx): dens0ij = dens0[:,j,i] if np.logical_or(np.logical_or(isopyc<np.nanmin(dens0ij), np.nanmax(dens0ij)<isopyc), np.isnan(dens0ij).all()): continue else: dens0ref = np.interp(zref, z, dens0ij) # Refined density profile. dens0refn = near(dens0ref, isopyc) fz=dens0ref==dens0refn try: hisopyc[j,i] = zref[fz] except ValueError: print("Warning: More than 1 (%d) nearest depths found. Using the median of the depths for point (j=%d,i=%d)."%(fz.sum(), j, i)) hisopyc[j,i] = np.nanmedian(zref[fz]) return hisopyc def whiten_zero(x, y, z, ax, cs, n=1, cmap=plt.cm.RdBu_r, zorder=9): """ USAGE ----- whiten_zero(x, y, z, ax, cs, n=1, cmap=plt.cm.RdBu_r, zorder=9) Changes to white the color of the 'n' (defaults to 1) neighboring patches about the zero contour created by a command like 'cs = ax.contourf(x, y, z)'. """ x, y, z = map(np.asanyarray, (x,y,z)) white = (1.,1.,1.) cslevs = cs.levels assert 0. in cslevs f0=np.where(cslevs==0.)[0][0] f0m, f0p = f0-n, f0+n c0m, c0p = cslevs[f0m], cslevs[f0p] ax.contourf(x, y, z, levels=[c0m, c0p], linestyles='none', colors=[white, white], cmap=None, zorder=zorder) def wind2stress(u, v, formula='large_pond1981-modified'): """ USAGE ----- taux,tauy = wind2stress(u, v, formula='mellor2004') Converts u,v wind vector components to taux,tauy wind stress vector components. """ rho_air = 1.226 # kg/m3 mag = np.sqrt(u**2+v**2) # m/s Cd = np.zeros( mag.shape ) # Drag coefficient. if formula=='large_pond1981-modified': # Large and Pond (1981) formula # modified for light winds, as # in Trenberth et al. (1990). f=mag<=1. Cd[f] = 2.18e-3 f=np.logical_and(mag>1.,mag<3.) Cd[f] = (0.62+1.56/mag[f])*1e-3 f=np.logical_and(mag>=3.,mag<10.) Cd[f] = 1.14e-3 f=mag>=10. Cd[f] = (0.49 + 0.065*mag[f])*1e-3 elif formula=='mellor2004': Cd = 7.5e-4 + 6.7e-5*mag else: np.disp('Unknown formula for Cd.') pass # Computing wind stress [N/m2] taux = rho_air*Cd*mag*u tauy = rho_air*Cd*mag*v return taux,tauy def gen_dates(start, end, dt='day', input_datetime=False): """ Returns a list of datetimes within the date range from `start` to `end`, at a `dt` time interval. `dt` can be 'second', 'minute', 'hour', 'day', 'week', 'month' or 'year'. If `input_datetime` is False (default), `start` and `end` must be a date in string form. If `input_datetime` is True, `start` and `end` must be datetime objects. Note ---- Modified from original function by Filipe Fernandes (ocefpaf@gmail.com). Example ------- >>> from ap_tools.utils import gen_dates >>> from datetime import datetime >>> start = '1989-08-19' >>> end = datetime.utcnow().strftime("%Y-%m-%d") >>> gen_dates(start, end, dt='day') """ DT = dict(second=rrule.SECONDLY, minute=rrule.MINUTELY, hour=rrule.HOURLY, day=rrule.DAILY, week=rrule.WEEKLY, month=rrule.MONTHLY, year=rrule.YEARLY) dt = DT[dt] if input_datetime: # Input are datetime objects. No parsing needed. dates = rrule.rrule(dt, dtstart=start, until=end) else: # Input in string form, parse into datetime objects. dates = rrule.rrule(dt, dtstart=parser.parse(start), until=parser.parse(end)) return list(dates) def fmt_isobath(cs, fontsize=8, fmt='%g', inline=True, inline_spacing=7, manual=True, **kw): """ Formats the labels of isobath contours. `manual` is set to `True` by default, but can be `False`, or a tuple/list of tuples with the coordinates of the labels. All options are passed to plt.clabel(). """ isobstrH = plt.clabel(cs, fontsize=fontsize, fmt=fmt, inline=inline, \ inline_spacing=inline_spacing, manual=manual, **kw) for ih in range(0, len(isobstrH)): # Appends 'm' for meters at the end of the label. isobstrh = isobstrH[ih] isobstr = isobstrh.get_text() isobstr = isobstr.replace('-','') + ' m' isobstrh.set_text(isobstr) def float2latex(f, ndigits=1): """ USAGE ----- texstr = float2latex(f, ndigits=1) Converts a float input into a latex-formatted string with 'ndigits' (defaults to 1). Adapted from: http://stackoverflow.com/questions/13490292/format-number-using-latex-notation-in-python """ float_str = "{0:.%se}"%ndigits float_str = float_str.format(f) base, exponent = float_str.split("e") return "${0} \times 10^{{{1}}}$".format(base, int(exponent)) def mat2npz(matname): """ USAGE ----- mat2npz(matname) Extract variables stored in a .mat file, and saves them in a .npz file. """ d = loadmat(matname) _ = d.pop('__header__') _ = d.pop('__globals__') _ = d.pop('__version__') npzname = matname[:-4] + '.npz' np.savez(npzname,**d) return None def bb_map(lons, lats, ax, projection='merc', resolution='i', drawparallels=True, drawmeridians=True): """ USAGE ----- m = bb_map(lons, lats, **kwargs) Returns a Basemap instance with lon,lat bounding limits inferred from the input arrays `lons`,`lats`. Coastlines, countries, states, parallels and meridians are drawn, and continents are filled. """ lons,lats = map(np.asanyarray, (lons,lats)) lonmin,lonmax = lons.min(),lons.max() latmin,latmax = lats.min(),lats.max() m = Basemap(llcrnrlon=lonmin, urcrnrlon=lonmax, llcrnrlat=latmin, urcrnrlat=latmax, projection=projection, resolution=resolution, ax=ax) plt.ioff() # Avoid showing the figure. m.fillcontinents(color='0.9', zorder=9) m.drawcoastlines(zorder=10) m.drawstates(zorder=10) m.drawcountries(linewidth=2.0, zorder=10) m.drawmapboundary(zorder=9999) if drawmeridians: m.drawmeridians(np.arange(np.floor(lonmin), np.ceil(lonmax), 1), linewidth=0.15, labels=[1, 0, 1, 0], zorder=12) if drawparallels: m.drawparallels(np.arange(np.floor(latmin), np.ceil(latmax), 1), linewidth=0.15, labels=[1, 0, 0, 0], zorder=12) plt.ion() return m def dots_dualcolor(x, y, z, thresh=20., color_low='b', color_high='r', marker='o', markersize=5): """ USAGE ----- dots_dualcolor(x, y, z, thresh=20., color_low='b', color_high='r') Plots dots colored with a dual-color criterion, separated by a threshold value. """ ax = plt.gca() # Below-threshold dots. f=z<=thresh ax.plot(x[f], y[f], lw=0, marker=marker, ms=markersize, mfc=color_low, mec=color_low) # Above-threshold dots. f=z>thresh ax.plot(x[f], y[f], lw=0, marker=marker, ms=markersize, mfc=color_high, mec=color_high) if __name__=='__main__': import doctest doctest.testmod()
mit
CIFASIS/pylearn2
pylearn2/packaged_dependencies/theano_linear/unshared_conv/localdot.py
39
5044
""" WRITEME """ import logging from ..linear import LinearTransform from .unshared_conv import FilterActs, ImgActs from theano.compat.six.moves import xrange from theano.sandbox import cuda if cuda.cuda_available: import gpu_unshared_conv # register optimizations import numpy as np import warnings try: import matplotlib.pyplot as plt except (RuntimeError, ImportError, TypeError) as matplotlib_exception: warnings.warn("Unable to import matplotlib. Some features unavailable. " "Original exception: " + str(matplotlib_exception)) logger = logging.getLogger(__name__) class LocalDot(LinearTransform): """ LocalDot is an linear operation computationally similar to convolution in the spatial domain, except that whereas convolution applying a single filter or set of filters across an image, the LocalDot has different filterbanks for different points in the image. Mathematically, this is a general linear transform except for a restriction that filters are 0 outside of a spatially localized patch within the image. Image shape is 5-tuple: color_groups colors_per_group rows cols images Filterbank shape is 7-tuple (!) 0 row_positions 1 col_positions 2 colors_per_group 3 height 4 width 5 color_groups 6 filters_per_group The result of left-multiplication a 5-tuple with shape: filter_groups filters_per_group row_positions col_positions images Parameters ---------- filters : WRITEME irows : WRITEME Image rows icols : WRITEME Image columns subsample : WRITEME padding_start : WRITEME filters_shape : WRITEME message : WRITEME """ def __init__(self, filters, irows, icols=None, subsample=(1, 1), padding_start=None, filters_shape=None, message=""): LinearTransform.__init__(self, [filters]) self._filters = filters if filters_shape is None: self._filters_shape = tuple(filters.get_value(borrow=True).shape) else: self._filters_shape = tuple(filters_shape) self._irows = irows if icols is None: self._icols = irows else: self._icols = icols if self._icols != self._irows: raise NotImplementedError('GPU code at least needs square imgs') self._subsample = tuple(subsample) self._padding_start = padding_start if len(self._filters_shape) != 7: raise TypeError('need 7-tuple filter shape', self._filters_shape) if self._subsample[0] != self._subsample[1]: raise ValueError('subsampling must be same in rows and cols') self._filter_acts = FilterActs(self._subsample[0]) self._img_acts = ImgActs(module_stride=self._subsample[0]) if message: self._message = message else: self._message = filters.name def rmul(self, x): """ .. todo:: WRITEME """ assert x.ndim == 5 return self._filter_acts(x, self._filters) def rmul_T(self, x): """ .. todo:: WRITEME """ return self._img_acts(self._filters, x, self._irows, self._icols) def col_shape(self): """ .. todo:: WRITEME """ ishape = self.row_shape() + (-99,) fshape = self._filters_shape hshape, = self._filter_acts.infer_shape(None, (ishape, fshape)) assert hshape[-1] == -99 return hshape[:-1] def row_shape(self): """ .. todo:: WRITEME """ fshape = self._filters_shape fmodulesR, fmodulesC, fcolors, frows, fcols = fshape[:-2] fgroups, filters_per_group = fshape[-2:] return fgroups, fcolors, self._irows, self._icols def print_status(self): """ .. todo:: WRITEME """ raise NotImplementedError("TODO: fix dependence on non-existent " "ndarray_status function") """print ndarray_status( self._filters.get_value(borrow=True), msg='%s{%s}'% (self.__class__.__name__, self._message)) """ def imshow_gray(self): """ .. todo:: WRITEME """ filters = self._filters.get_value() modR, modC, colors, rows, cols, grps, fs_per_grp = filters.shape logger.info(filters.shape) rval = np.zeros(( modR * (rows + 1) - 1, modC * (cols + 1) - 1, )) for rr, modr in enumerate(xrange(0, rval.shape[0], rows + 1)): for cc, modc in enumerate(xrange(0, rval.shape[1], cols + 1)): rval[modr:modr + rows, modc:modc + cols] = filters[rr, cc, 0, :, :, 0, 0] plt.imshow(rval, cmap='gray') return rval
bsd-3-clause
leesavide/pythonista-docs
Documentation/matplotlib/mpl_examples/api/custom_scale_example.py
9
6401
from __future__ import unicode_literals import numpy as np from numpy import ma from matplotlib import scale as mscale from matplotlib import transforms as mtransforms from matplotlib.ticker import Formatter, FixedLocator class MercatorLatitudeScale(mscale.ScaleBase): """ Scales data in range -pi/2 to pi/2 (-90 to 90 degrees) using the system used to scale latitudes in a Mercator projection. The scale function: ln(tan(y) + sec(y)) The inverse scale function: atan(sinh(y)) Since the Mercator scale tends to infinity at +/- 90 degrees, there is user-defined threshold, above and below which nothing will be plotted. This defaults to +/- 85 degrees. source: http://en.wikipedia.org/wiki/Mercator_projection """ # The scale class must have a member ``name`` that defines the # string used to select the scale. For example, # ``gca().set_yscale("mercator")`` would be used to select this # scale. name = 'mercator' def __init__(self, axis, **kwargs): """ Any keyword arguments passed to ``set_xscale`` and ``set_yscale`` will be passed along to the scale's constructor. thresh: The degree above which to crop the data. """ mscale.ScaleBase.__init__(self) thresh = kwargs.pop("thresh", (85 / 180.0) * np.pi) if thresh >= np.pi / 2.0: raise ValueError("thresh must be less than pi/2") self.thresh = thresh def get_transform(self): """ Override this method to return a new instance that does the actual transformation of the data. The MercatorLatitudeTransform class is defined below as a nested class of this one. """ return self.MercatorLatitudeTransform(self.thresh) def set_default_locators_and_formatters(self, axis): """ Override to set up the locators and formatters to use with the scale. This is only required if the scale requires custom locators and formatters. Writing custom locators and formatters is rather outside the scope of this example, but there are many helpful examples in ``ticker.py``. In our case, the Mercator example uses a fixed locator from -90 to 90 degrees and a custom formatter class to put convert the radians to degrees and put a degree symbol after the value:: """ class DegreeFormatter(Formatter): def __call__(self, x, pos=None): # \u00b0 : degree symbol return "%d\u00b0" % ((x / np.pi) * 180.0) deg2rad = np.pi / 180.0 axis.set_major_locator(FixedLocator( np.arange(-90, 90, 10) * deg2rad)) axis.set_major_formatter(DegreeFormatter()) axis.set_minor_formatter(DegreeFormatter()) def limit_range_for_scale(self, vmin, vmax, minpos): """ Override to limit the bounds of the axis to the domain of the transform. In the case of Mercator, the bounds should be limited to the threshold that was passed in. Unlike the autoscaling provided by the tick locators, this range limiting will always be adhered to, whether the axis range is set manually, determined automatically or changed through panning and zooming. """ return max(vmin, -self.thresh), min(vmax, self.thresh) class MercatorLatitudeTransform(mtransforms.Transform): # There are two value members that must be defined. # ``input_dims`` and ``output_dims`` specify number of input # dimensions and output dimensions to the transformation. # These are used by the transformation framework to do some # error checking and prevent incompatible transformations from # being connected together. When defining transforms for a # scale, which are, by definition, separable and have only one # dimension, these members should always be set to 1. input_dims = 1 output_dims = 1 is_separable = True def __init__(self, thresh): mtransforms.Transform.__init__(self) self.thresh = thresh def transform_non_affine(self, a): """ This transform takes an Nx1 ``numpy`` array and returns a transformed copy. Since the range of the Mercator scale is limited by the user-specified threshold, the input array must be masked to contain only valid values. ``matplotlib`` will handle masked arrays and remove the out-of-range data from the plot. Importantly, the ``transform`` method *must* return an array that is the same shape as the input array, since these values need to remain synchronized with values in the other dimension. """ masked = ma.masked_where((a < -self.thresh) | (a > self.thresh), a) if masked.mask.any(): return ma.log(np.abs(ma.tan(masked) + 1.0 / ma.cos(masked))) else: return np.log(np.abs(np.tan(a) + 1.0 / np.cos(a))) def inverted(self): """ Override this method so matplotlib knows how to get the inverse transform for this transform. """ return MercatorLatitudeScale.InvertedMercatorLatitudeTransform(self.thresh) class InvertedMercatorLatitudeTransform(mtransforms.Transform): input_dims = 1 output_dims = 1 is_separable = True def __init__(self, thresh): mtransforms.Transform.__init__(self) self.thresh = thresh def transform_non_affine(self, a): return np.arctan(np.sinh(a)) def inverted(self): return MercatorLatitudeScale.MercatorLatitudeTransform(self.thresh) # Now that the Scale class has been defined, it must be registered so # that ``matplotlib`` can find it. mscale.register_scale(MercatorLatitudeScale) if __name__ == '__main__': import matplotlib.pyplot as plt t = np.arange(-180.0, 180.0, 0.1) s = t / 360.0 * np.pi plt.plot(t, s, '-', lw=2) plt.gca().set_yscale('mercator') plt.xlabel('Longitude') plt.ylabel('Latitude') plt.title('Mercator: Projection of the Oppressor') plt.grid(True) plt.show()
apache-2.0
toastedcornflakes/scikit-learn
sklearn/linear_model/tests/test_base.py
83
15089
# Author: Alexandre Gramfort <alexandre.gramfort@inria.fr> # Fabian Pedregosa <fabian.pedregosa@inria.fr> # # License: BSD 3 clause import numpy as np from scipy import sparse from scipy import linalg from itertools import product from sklearn.utils.testing import assert_array_almost_equal from sklearn.utils.testing import assert_almost_equal from sklearn.utils.testing import assert_equal from sklearn.utils.testing import ignore_warnings from sklearn.linear_model.base import LinearRegression from sklearn.linear_model.base import _preprocess_data from sklearn.linear_model.base import sparse_center_data, center_data from sklearn.linear_model.base import _rescale_data from sklearn.utils import check_random_state from sklearn.utils.testing import assert_greater from sklearn.datasets.samples_generator import make_sparse_uncorrelated from sklearn.datasets.samples_generator import make_regression rng = np.random.RandomState(0) def test_linear_regression(): # Test LinearRegression on a simple dataset. # a simple dataset X = [[1], [2]] Y = [1, 2] reg = LinearRegression() reg.fit(X, Y) assert_array_almost_equal(reg.coef_, [1]) assert_array_almost_equal(reg.intercept_, [0]) assert_array_almost_equal(reg.predict(X), [1, 2]) # test it also for degenerate input X = [[1]] Y = [0] reg = LinearRegression() reg.fit(X, Y) assert_array_almost_equal(reg.coef_, [0]) assert_array_almost_equal(reg.intercept_, [0]) assert_array_almost_equal(reg.predict(X), [0]) def test_linear_regression_sample_weights(): # TODO: loop over sparse data as well rng = np.random.RandomState(0) # It would not work with under-determined systems for n_samples, n_features in ((6, 5), ): y = rng.randn(n_samples) X = rng.randn(n_samples, n_features) sample_weight = 1.0 + rng.rand(n_samples) for intercept in (True, False): # LinearRegression with explicit sample_weight reg = LinearRegression(fit_intercept=intercept) reg.fit(X, y, sample_weight=sample_weight) coefs1 = reg.coef_ inter1 = reg.intercept_ assert_equal(reg.coef_.shape, (X.shape[1], )) # sanity checks assert_greater(reg.score(X, y), 0.5) # Closed form of the weighted least square # theta = (X^T W X)^(-1) * X^T W y W = np.diag(sample_weight) if intercept is False: X_aug = X else: dummy_column = np.ones(shape=(n_samples, 1)) X_aug = np.concatenate((dummy_column, X), axis=1) coefs2 = linalg.solve(X_aug.T.dot(W).dot(X_aug), X_aug.T.dot(W).dot(y)) if intercept is False: assert_array_almost_equal(coefs1, coefs2) else: assert_array_almost_equal(coefs1, coefs2[1:]) assert_almost_equal(inter1, coefs2[0]) def test_raises_value_error_if_sample_weights_greater_than_1d(): # Sample weights must be either scalar or 1D n_sampless = [2, 3] n_featuress = [3, 2] for n_samples, n_features in zip(n_sampless, n_featuress): X = rng.randn(n_samples, n_features) y = rng.randn(n_samples) sample_weights_OK = rng.randn(n_samples) ** 2 + 1 sample_weights_OK_1 = 1. sample_weights_OK_2 = 2. reg = LinearRegression() # make sure the "OK" sample weights actually work reg.fit(X, y, sample_weights_OK) reg.fit(X, y, sample_weights_OK_1) reg.fit(X, y, sample_weights_OK_2) def test_fit_intercept(): # Test assertions on betas shape. X2 = np.array([[0.38349978, 0.61650022], [0.58853682, 0.41146318]]) X3 = np.array([[0.27677969, 0.70693172, 0.01628859], [0.08385139, 0.20692515, 0.70922346]]) y = np.array([1, 1]) lr2_without_intercept = LinearRegression(fit_intercept=False).fit(X2, y) lr2_with_intercept = LinearRegression(fit_intercept=True).fit(X2, y) lr3_without_intercept = LinearRegression(fit_intercept=False).fit(X3, y) lr3_with_intercept = LinearRegression(fit_intercept=True).fit(X3, y) assert_equal(lr2_with_intercept.coef_.shape, lr2_without_intercept.coef_.shape) assert_equal(lr3_with_intercept.coef_.shape, lr3_without_intercept.coef_.shape) assert_equal(lr2_without_intercept.coef_.ndim, lr3_without_intercept.coef_.ndim) def test_linear_regression_sparse(random_state=0): # Test that linear regression also works with sparse data random_state = check_random_state(random_state) for i in range(10): n = 100 X = sparse.eye(n, n) beta = random_state.rand(n) y = X * beta[:, np.newaxis] ols = LinearRegression() ols.fit(X, y.ravel()) assert_array_almost_equal(beta, ols.coef_ + ols.intercept_) assert_array_almost_equal(ols.predict(X) - y.ravel(), 0) def test_linear_regression_multiple_outcome(random_state=0): # Test multiple-outcome linear regressions X, y = make_regression(random_state=random_state) Y = np.vstack((y, y)).T n_features = X.shape[1] reg = LinearRegression(fit_intercept=True) reg.fit((X), Y) assert_equal(reg.coef_.shape, (2, n_features)) Y_pred = reg.predict(X) reg.fit(X, y) y_pred = reg.predict(X) assert_array_almost_equal(np.vstack((y_pred, y_pred)).T, Y_pred, decimal=3) def test_linear_regression_sparse_multiple_outcome(random_state=0): # Test multiple-outcome linear regressions with sparse data random_state = check_random_state(random_state) X, y = make_sparse_uncorrelated(random_state=random_state) X = sparse.coo_matrix(X) Y = np.vstack((y, y)).T n_features = X.shape[1] ols = LinearRegression() ols.fit(X, Y) assert_equal(ols.coef_.shape, (2, n_features)) Y_pred = ols.predict(X) ols.fit(X, y.ravel()) y_pred = ols.predict(X) assert_array_almost_equal(np.vstack((y_pred, y_pred)).T, Y_pred, decimal=3) def test_preprocess_data(): n_samples = 200 n_features = 2 X = rng.rand(n_samples, n_features) y = rng.rand(n_samples) expected_X_mean = np.mean(X, axis=0) expected_X_norm = np.std(X, axis=0) * np.sqrt(X.shape[0]) expected_y_mean = np.mean(y, axis=0) Xt, yt, X_mean, y_mean, X_norm = \ _preprocess_data(X, y, fit_intercept=False, normalize=False) assert_array_almost_equal(X_mean, np.zeros(n_features)) assert_array_almost_equal(y_mean, 0) assert_array_almost_equal(X_norm, np.ones(n_features)) assert_array_almost_equal(Xt, X) assert_array_almost_equal(yt, y) Xt, yt, X_mean, y_mean, X_norm = \ _preprocess_data(X, y, fit_intercept=True, normalize=False) assert_array_almost_equal(X_mean, expected_X_mean) assert_array_almost_equal(y_mean, expected_y_mean) assert_array_almost_equal(X_norm, np.ones(n_features)) assert_array_almost_equal(Xt, X - expected_X_mean) assert_array_almost_equal(yt, y - expected_y_mean) Xt, yt, X_mean, y_mean, X_norm = \ _preprocess_data(X, y, fit_intercept=True, normalize=True) assert_array_almost_equal(X_mean, expected_X_mean) assert_array_almost_equal(y_mean, expected_y_mean) assert_array_almost_equal(X_norm, expected_X_norm) assert_array_almost_equal(Xt, (X - expected_X_mean) / expected_X_norm) assert_array_almost_equal(yt, y - expected_y_mean) def test_preprocess_data_multioutput(): n_samples = 200 n_features = 3 n_outputs = 2 X = rng.rand(n_samples, n_features) y = rng.rand(n_samples, n_outputs) expected_y_mean = np.mean(y, axis=0) args = [X, sparse.csc_matrix(X)] for X in args: _, yt, _, y_mean, _ = _preprocess_data(X, y, fit_intercept=False, normalize=False) assert_array_almost_equal(y_mean, np.zeros(n_outputs)) assert_array_almost_equal(yt, y) _, yt, _, y_mean, _ = _preprocess_data(X, y, fit_intercept=True, normalize=False) assert_array_almost_equal(y_mean, expected_y_mean) assert_array_almost_equal(yt, y - y_mean) _, yt, _, y_mean, _ = _preprocess_data(X, y, fit_intercept=True, normalize=True) assert_array_almost_equal(y_mean, expected_y_mean) assert_array_almost_equal(yt, y - y_mean) def test_preprocess_data_weighted(): n_samples = 200 n_features = 2 X = rng.rand(n_samples, n_features) y = rng.rand(n_samples) sample_weight = rng.rand(n_samples) expected_X_mean = np.average(X, axis=0, weights=sample_weight) expected_y_mean = np.average(y, axis=0, weights=sample_weight) # XXX: if normalize=True, should we expect a weighted standard deviation? # Currently not weighted, but calculated with respect to weighted mean expected_X_norm = (np.sqrt(X.shape[0]) * np.mean((X - expected_X_mean) ** 2, axis=0) ** .5) Xt, yt, X_mean, y_mean, X_norm = \ _preprocess_data(X, y, fit_intercept=True, normalize=False, sample_weight=sample_weight) assert_array_almost_equal(X_mean, expected_X_mean) assert_array_almost_equal(y_mean, expected_y_mean) assert_array_almost_equal(X_norm, np.ones(n_features)) assert_array_almost_equal(Xt, X - expected_X_mean) assert_array_almost_equal(yt, y - expected_y_mean) Xt, yt, X_mean, y_mean, X_norm = \ _preprocess_data(X, y, fit_intercept=True, normalize=True, sample_weight=sample_weight) assert_array_almost_equal(X_mean, expected_X_mean) assert_array_almost_equal(y_mean, expected_y_mean) assert_array_almost_equal(X_norm, expected_X_norm) assert_array_almost_equal(Xt, (X - expected_X_mean) / expected_X_norm) assert_array_almost_equal(yt, y - expected_y_mean) def test_sparse_preprocess_data_with_return_mean(): n_samples = 200 n_features = 2 # random_state not supported yet in sparse.rand X = sparse.rand(n_samples, n_features, density=.5) # , random_state=rng X = X.tolil() y = rng.rand(n_samples) XA = X.toarray() expected_X_norm = np.std(XA, axis=0) * np.sqrt(X.shape[0]) Xt, yt, X_mean, y_mean, X_norm = \ _preprocess_data(X, y, fit_intercept=False, normalize=False, return_mean=True) assert_array_almost_equal(X_mean, np.zeros(n_features)) assert_array_almost_equal(y_mean, 0) assert_array_almost_equal(X_norm, np.ones(n_features)) assert_array_almost_equal(Xt.A, XA) assert_array_almost_equal(yt, y) Xt, yt, X_mean, y_mean, X_norm = \ _preprocess_data(X, y, fit_intercept=True, normalize=False, return_mean=True) assert_array_almost_equal(X_mean, np.mean(XA, axis=0)) assert_array_almost_equal(y_mean, np.mean(y, axis=0)) assert_array_almost_equal(X_norm, np.ones(n_features)) assert_array_almost_equal(Xt.A, XA) assert_array_almost_equal(yt, y - np.mean(y, axis=0)) Xt, yt, X_mean, y_mean, X_norm = \ _preprocess_data(X, y, fit_intercept=True, normalize=True, return_mean=True) assert_array_almost_equal(X_mean, np.mean(XA, axis=0)) assert_array_almost_equal(y_mean, np.mean(y, axis=0)) assert_array_almost_equal(X_norm, expected_X_norm) assert_array_almost_equal(Xt.A, XA / expected_X_norm) assert_array_almost_equal(yt, y - np.mean(y, axis=0)) def test_csr_preprocess_data(): # Test output format of _preprocess_data, when input is csr X, y = make_regression() X[X < 2.5] = 0.0 csr = sparse.csr_matrix(X) csr_, y, _, _, _ = _preprocess_data(csr, y, True) assert_equal(csr_.getformat(), 'csr') def test_rescale_data(): n_samples = 200 n_features = 2 sample_weight = 1.0 + rng.rand(n_samples) X = rng.rand(n_samples, n_features) y = rng.rand(n_samples) rescaled_X, rescaled_y = _rescale_data(X, y, sample_weight) rescaled_X2 = X * np.sqrt(sample_weight)[:, np.newaxis] rescaled_y2 = y * np.sqrt(sample_weight) assert_array_almost_equal(rescaled_X, rescaled_X2) assert_array_almost_equal(rescaled_y, rescaled_y2) @ignore_warnings # all deprecation warnings def test_deprecation_center_data(): n_samples = 200 n_features = 2 w = 1.0 + rng.rand(n_samples) X = rng.rand(n_samples, n_features) y = rng.rand(n_samples) param_grid = product([True, False], [True, False], [True, False], [None, w]) for (fit_intercept, normalize, copy, sample_weight) in param_grid: XX = X.copy() # such that we can try copy=False as well X1, y1, X1_mean, X1_var, y1_mean = \ center_data(XX, y, fit_intercept=fit_intercept, normalize=normalize, copy=copy, sample_weight=sample_weight) XX = X.copy() X2, y2, X2_mean, X2_var, y2_mean = \ _preprocess_data(XX, y, fit_intercept=fit_intercept, normalize=normalize, copy=copy, sample_weight=sample_weight) assert_array_almost_equal(X1, X2) assert_array_almost_equal(y1, y2) assert_array_almost_equal(X1_mean, X2_mean) assert_array_almost_equal(X1_var, X2_var) assert_array_almost_equal(y1_mean, y2_mean) # Sparse cases X = sparse.csr_matrix(X) for (fit_intercept, normalize, copy, sample_weight) in param_grid: X1, y1, X1_mean, X1_var, y1_mean = \ center_data(X, y, fit_intercept=fit_intercept, normalize=normalize, copy=copy, sample_weight=sample_weight) X2, y2, X2_mean, X2_var, y2_mean = \ _preprocess_data(X, y, fit_intercept=fit_intercept, normalize=normalize, copy=copy, sample_weight=sample_weight, return_mean=False) assert_array_almost_equal(X1.toarray(), X2.toarray()) assert_array_almost_equal(y1, y2) assert_array_almost_equal(X1_mean, X2_mean) assert_array_almost_equal(X1_var, X2_var) assert_array_almost_equal(y1_mean, y2_mean) for (fit_intercept, normalize) in product([True, False], [True, False]): X1, y1, X1_mean, X1_var, y1_mean = \ sparse_center_data(X, y, fit_intercept=fit_intercept, normalize=normalize) X2, y2, X2_mean, X2_var, y2_mean = \ _preprocess_data(X, y, fit_intercept=fit_intercept, normalize=normalize, return_mean=True) assert_array_almost_equal(X1.toarray(), X2.toarray()) assert_array_almost_equal(y1, y2) assert_array_almost_equal(X1_mean, X2_mean) assert_array_almost_equal(X1_var, X2_var) assert_array_almost_equal(y1_mean, y2_mean)
bsd-3-clause
kylerbrown/scikit-learn
sklearn/feature_selection/tests/test_rfe.py
209
11733
""" Testing Recursive feature elimination """ import warnings import numpy as np from numpy.testing import assert_array_almost_equal, assert_array_equal from nose.tools import assert_equal, assert_true from scipy import sparse from sklearn.feature_selection.rfe import RFE, RFECV from sklearn.datasets import load_iris, make_friedman1 from sklearn.metrics import zero_one_loss from sklearn.svm import SVC, SVR from sklearn.ensemble import RandomForestClassifier from sklearn.cross_validation import cross_val_score from sklearn.utils import check_random_state from sklearn.utils.testing import ignore_warnings from sklearn.utils.testing import assert_warns_message from sklearn.utils.testing import assert_greater from sklearn.metrics import make_scorer from sklearn.metrics import get_scorer class MockClassifier(object): """ Dummy classifier to test recursive feature ellimination """ def __init__(self, foo_param=0): self.foo_param = foo_param def fit(self, X, Y): assert_true(len(X) == len(Y)) self.coef_ = np.ones(X.shape[1], dtype=np.float64) return self def predict(self, T): return T.shape[0] predict_proba = predict decision_function = predict transform = predict def score(self, X=None, Y=None): if self.foo_param > 1: score = 1. else: score = 0. return score def get_params(self, deep=True): return {'foo_param': self.foo_param} def set_params(self, **params): return self def test_rfe_set_params(): generator = check_random_state(0) iris = load_iris() X = np.c_[iris.data, generator.normal(size=(len(iris.data), 6))] y = iris.target clf = SVC(kernel="linear") rfe = RFE(estimator=clf, n_features_to_select=4, step=0.1) y_pred = rfe.fit(X, y).predict(X) clf = SVC() with warnings.catch_warnings(record=True): # estimator_params is deprecated rfe = RFE(estimator=clf, n_features_to_select=4, step=0.1, estimator_params={'kernel': 'linear'}) y_pred2 = rfe.fit(X, y).predict(X) assert_array_equal(y_pred, y_pred2) def test_rfe_features_importance(): generator = check_random_state(0) iris = load_iris() X = np.c_[iris.data, generator.normal(size=(len(iris.data), 6))] y = iris.target clf = RandomForestClassifier(n_estimators=20, random_state=generator, max_depth=2) rfe = RFE(estimator=clf, n_features_to_select=4, step=0.1) rfe.fit(X, y) assert_equal(len(rfe.ranking_), X.shape[1]) clf_svc = SVC(kernel="linear") rfe_svc = RFE(estimator=clf_svc, n_features_to_select=4, step=0.1) rfe_svc.fit(X, y) # Check if the supports are equal assert_array_equal(rfe.get_support(), rfe_svc.get_support()) def test_rfe_deprecation_estimator_params(): deprecation_message = ("The parameter 'estimator_params' is deprecated as " "of version 0.16 and will be removed in 0.18. The " "parameter is no longer necessary because the " "value is set via the estimator initialisation or " "set_params method.") generator = check_random_state(0) iris = load_iris() X = np.c_[iris.data, generator.normal(size=(len(iris.data), 6))] y = iris.target assert_warns_message(DeprecationWarning, deprecation_message, RFE(estimator=SVC(), n_features_to_select=4, step=0.1, estimator_params={'kernel': 'linear'}).fit, X=X, y=y) assert_warns_message(DeprecationWarning, deprecation_message, RFECV(estimator=SVC(), step=1, cv=5, estimator_params={'kernel': 'linear'}).fit, X=X, y=y) def test_rfe(): generator = check_random_state(0) iris = load_iris() X = np.c_[iris.data, generator.normal(size=(len(iris.data), 6))] X_sparse = sparse.csr_matrix(X) y = iris.target # dense model clf = SVC(kernel="linear") rfe = RFE(estimator=clf, n_features_to_select=4, step=0.1) rfe.fit(X, y) X_r = rfe.transform(X) clf.fit(X_r, y) assert_equal(len(rfe.ranking_), X.shape[1]) # sparse model clf_sparse = SVC(kernel="linear") rfe_sparse = RFE(estimator=clf_sparse, n_features_to_select=4, step=0.1) rfe_sparse.fit(X_sparse, y) X_r_sparse = rfe_sparse.transform(X_sparse) assert_equal(X_r.shape, iris.data.shape) assert_array_almost_equal(X_r[:10], iris.data[:10]) assert_array_almost_equal(rfe.predict(X), clf.predict(iris.data)) assert_equal(rfe.score(X, y), clf.score(iris.data, iris.target)) assert_array_almost_equal(X_r, X_r_sparse.toarray()) def test_rfe_mockclassifier(): generator = check_random_state(0) iris = load_iris() X = np.c_[iris.data, generator.normal(size=(len(iris.data), 6))] y = iris.target # dense model clf = MockClassifier() rfe = RFE(estimator=clf, n_features_to_select=4, step=0.1) rfe.fit(X, y) X_r = rfe.transform(X) clf.fit(X_r, y) assert_equal(len(rfe.ranking_), X.shape[1]) assert_equal(X_r.shape, iris.data.shape) def test_rfecv(): generator = check_random_state(0) iris = load_iris() X = np.c_[iris.data, generator.normal(size=(len(iris.data), 6))] y = list(iris.target) # regression test: list should be supported # Test using the score function rfecv = RFECV(estimator=SVC(kernel="linear"), step=1, cv=5) rfecv.fit(X, y) # non-regression test for missing worst feature: assert_equal(len(rfecv.grid_scores_), X.shape[1]) assert_equal(len(rfecv.ranking_), X.shape[1]) X_r = rfecv.transform(X) # All the noisy variable were filtered out assert_array_equal(X_r, iris.data) # same in sparse rfecv_sparse = RFECV(estimator=SVC(kernel="linear"), step=1, cv=5) X_sparse = sparse.csr_matrix(X) rfecv_sparse.fit(X_sparse, y) X_r_sparse = rfecv_sparse.transform(X_sparse) assert_array_equal(X_r_sparse.toarray(), iris.data) # Test using a customized loss function scoring = make_scorer(zero_one_loss, greater_is_better=False) rfecv = RFECV(estimator=SVC(kernel="linear"), step=1, cv=5, scoring=scoring) ignore_warnings(rfecv.fit)(X, y) X_r = rfecv.transform(X) assert_array_equal(X_r, iris.data) # Test using a scorer scorer = get_scorer('accuracy') rfecv = RFECV(estimator=SVC(kernel="linear"), step=1, cv=5, scoring=scorer) rfecv.fit(X, y) X_r = rfecv.transform(X) assert_array_equal(X_r, iris.data) # Test fix on grid_scores def test_scorer(estimator, X, y): return 1.0 rfecv = RFECV(estimator=SVC(kernel="linear"), step=1, cv=5, scoring=test_scorer) rfecv.fit(X, y) assert_array_equal(rfecv.grid_scores_, np.ones(len(rfecv.grid_scores_))) # Same as the first two tests, but with step=2 rfecv = RFECV(estimator=SVC(kernel="linear"), step=2, cv=5) rfecv.fit(X, y) assert_equal(len(rfecv.grid_scores_), 6) assert_equal(len(rfecv.ranking_), X.shape[1]) X_r = rfecv.transform(X) assert_array_equal(X_r, iris.data) rfecv_sparse = RFECV(estimator=SVC(kernel="linear"), step=2, cv=5) X_sparse = sparse.csr_matrix(X) rfecv_sparse.fit(X_sparse, y) X_r_sparse = rfecv_sparse.transform(X_sparse) assert_array_equal(X_r_sparse.toarray(), iris.data) def test_rfecv_mockclassifier(): generator = check_random_state(0) iris = load_iris() X = np.c_[iris.data, generator.normal(size=(len(iris.data), 6))] y = list(iris.target) # regression test: list should be supported # Test using the score function rfecv = RFECV(estimator=MockClassifier(), step=1, cv=5) rfecv.fit(X, y) # non-regression test for missing worst feature: assert_equal(len(rfecv.grid_scores_), X.shape[1]) assert_equal(len(rfecv.ranking_), X.shape[1]) def test_rfe_estimator_tags(): rfe = RFE(SVC(kernel='linear')) assert_equal(rfe._estimator_type, "classifier") # make sure that cross-validation is stratified iris = load_iris() score = cross_val_score(rfe, iris.data, iris.target) assert_greater(score.min(), .7) def test_rfe_min_step(): n_features = 10 X, y = make_friedman1(n_samples=50, n_features=n_features, random_state=0) n_samples, n_features = X.shape estimator = SVR(kernel="linear") # Test when floor(step * n_features) <= 0 selector = RFE(estimator, step=0.01) sel = selector.fit(X, y) assert_equal(sel.support_.sum(), n_features // 2) # Test when step is between (0,1) and floor(step * n_features) > 0 selector = RFE(estimator, step=0.20) sel = selector.fit(X, y) assert_equal(sel.support_.sum(), n_features // 2) # Test when step is an integer selector = RFE(estimator, step=5) sel = selector.fit(X, y) assert_equal(sel.support_.sum(), n_features // 2) def test_number_of_subsets_of_features(): # In RFE, 'number_of_subsets_of_features' # = the number of iterations in '_fit' # = max(ranking_) # = 1 + (n_features + step - n_features_to_select - 1) // step # After optimization #4534, this number # = 1 + np.ceil((n_features - n_features_to_select) / float(step)) # This test case is to test their equivalence, refer to #4534 and #3824 def formula1(n_features, n_features_to_select, step): return 1 + ((n_features + step - n_features_to_select - 1) // step) def formula2(n_features, n_features_to_select, step): return 1 + np.ceil((n_features - n_features_to_select) / float(step)) # RFE # Case 1, n_features - n_features_to_select is divisible by step # Case 2, n_features - n_features_to_select is not divisible by step n_features_list = [11, 11] n_features_to_select_list = [3, 3] step_list = [2, 3] for n_features, n_features_to_select, step in zip( n_features_list, n_features_to_select_list, step_list): generator = check_random_state(43) X = generator.normal(size=(100, n_features)) y = generator.rand(100).round() rfe = RFE(estimator=SVC(kernel="linear"), n_features_to_select=n_features_to_select, step=step) rfe.fit(X, y) # this number also equals to the maximum of ranking_ assert_equal(np.max(rfe.ranking_), formula1(n_features, n_features_to_select, step)) assert_equal(np.max(rfe.ranking_), formula2(n_features, n_features_to_select, step)) # In RFECV, 'fit' calls 'RFE._fit' # 'number_of_subsets_of_features' of RFE # = the size of 'grid_scores' of RFECV # = the number of iterations of the for loop before optimization #4534 # RFECV, n_features_to_select = 1 # Case 1, n_features - 1 is divisible by step # Case 2, n_features - 1 is not divisible by step n_features_to_select = 1 n_features_list = [11, 10] step_list = [2, 2] for n_features, step in zip(n_features_list, step_list): generator = check_random_state(43) X = generator.normal(size=(100, n_features)) y = generator.rand(100).round() rfecv = RFECV(estimator=SVC(kernel="linear"), step=step, cv=5) rfecv.fit(X, y) assert_equal(rfecv.grid_scores_.shape[0], formula1(n_features, n_features_to_select, step)) assert_equal(rfecv.grid_scores_.shape[0], formula2(n_features, n_features_to_select, step))
bsd-3-clause
giorgiop/scikit-learn
sklearn/linear_model/__init__.py
83
3139
""" The :mod:`sklearn.linear_model` module implements generalized linear models. It includes Ridge regression, Bayesian Regression, Lasso and Elastic Net estimators computed with Least Angle Regression and coordinate descent. It also implements Stochastic Gradient Descent related algorithms. """ # See http://scikit-learn.sourceforge.net/modules/sgd.html and # http://scikit-learn.sourceforge.net/modules/linear_model.html for # complete documentation. from .base import LinearRegression from .bayes import BayesianRidge, ARDRegression from .least_angle import (Lars, LassoLars, lars_path, LarsCV, LassoLarsCV, LassoLarsIC) from .coordinate_descent import (Lasso, ElasticNet, LassoCV, ElasticNetCV, lasso_path, enet_path, MultiTaskLasso, MultiTaskElasticNet, MultiTaskElasticNetCV, MultiTaskLassoCV) from .huber import HuberRegressor from .sgd_fast import Hinge, Log, ModifiedHuber, SquaredLoss, Huber from .stochastic_gradient import SGDClassifier, SGDRegressor from .ridge import (Ridge, RidgeCV, RidgeClassifier, RidgeClassifierCV, ridge_regression) from .logistic import (LogisticRegression, LogisticRegressionCV, logistic_regression_path) from .omp import (orthogonal_mp, orthogonal_mp_gram, OrthogonalMatchingPursuit, OrthogonalMatchingPursuitCV) from .passive_aggressive import PassiveAggressiveClassifier from .passive_aggressive import PassiveAggressiveRegressor from .perceptron import Perceptron from .randomized_l1 import (RandomizedLasso, RandomizedLogisticRegression, lasso_stability_path) from .ransac import RANSACRegressor from .theil_sen import TheilSenRegressor __all__ = ['ARDRegression', 'BayesianRidge', 'ElasticNet', 'ElasticNetCV', 'Hinge', 'HuberRegressor', 'Lars', 'LarsCV', 'Lasso', 'LassoCV', 'LassoLars', 'LassoLarsCV', 'LassoLarsIC', 'LinearRegression', 'Log', 'LogisticRegression', 'LogisticRegressionCV', 'ModifiedHuber', 'MultiTaskElasticNet', 'MultiTaskElasticNetCV', 'MultiTaskLasso', 'MultiTaskLassoCV', 'OrthogonalMatchingPursuit', 'OrthogonalMatchingPursuitCV', 'PassiveAggressiveClassifier', 'PassiveAggressiveRegressor', 'Perceptron', 'RandomizedLasso', 'RandomizedLogisticRegression', 'Ridge', 'RidgeCV', 'RidgeClassifier', 'RidgeClassifierCV', 'SGDClassifier', 'SGDRegressor', 'SquaredLoss', 'TheilSenRegressor', 'enet_path', 'lars_path', 'lasso_path', 'lasso_stability_path', 'logistic_regression_path', 'orthogonal_mp', 'orthogonal_mp_gram', 'ridge_regression', 'RANSACRegressor']
bsd-3-clause
jonyroda97/redbot-amigosprovaveis
lib/matplotlib/units.py
2
6084
""" The classes here provide support for using custom classes with matplotlib, e.g., those that do not expose the array interface but know how to convert themselves to arrays. It also supports classes with units and units conversion. Use cases include converters for custom objects, e.g., a list of datetime objects, as well as for objects that are unit aware. We don't assume any particular units implementation; rather a units implementation must provide the register with the Registry converter dictionary and a ConversionInterface. For example, here is a complete implementation which supports plotting with native datetime objects:: import matplotlib.units as units import matplotlib.dates as dates import matplotlib.ticker as ticker import datetime class DateConverter(units.ConversionInterface): @staticmethod def convert(value, unit, axis): 'convert value to a scalar or array' return dates.date2num(value) @staticmethod def axisinfo(unit, axis): 'return major and minor tick locators and formatters' if unit!='date': return None majloc = dates.AutoDateLocator() majfmt = dates.AutoDateFormatter(majloc) return AxisInfo(majloc=majloc, majfmt=majfmt, label='date') @staticmethod def default_units(x, axis): 'return the default unit for x or None' return 'date' # finally we register our object type with a converter units.registry[datetime.date] = DateConverter() """ from __future__ import (absolute_import, division, print_function, unicode_literals) import six from matplotlib.cbook import iterable, is_numlike, safe_first_element import numpy as np class AxisInfo(object): """information to support default axis labeling and tick labeling, and default limits""" def __init__(self, majloc=None, minloc=None, majfmt=None, minfmt=None, label=None, default_limits=None): """ majloc and minloc: TickLocators for the major and minor ticks majfmt and minfmt: TickFormatters for the major and minor ticks label: the default axis label default_limits: the default min, max of the axis if no data is present If any of the above are None, the axis will simply use the default """ self.majloc = majloc self.minloc = minloc self.majfmt = majfmt self.minfmt = minfmt self.label = label self.default_limits = default_limits class ConversionInterface(object): """ The minimal interface for a converter to take custom instances (or sequences) and convert them to values mpl can use """ @staticmethod def axisinfo(unit, axis): 'return an units.AxisInfo instance for axis with the specified units' return None @staticmethod def default_units(x, axis): 'return the default unit for x or None for the given axis' return None @staticmethod def convert(obj, unit, axis): """ convert obj using unit for the specified axis. If obj is a sequence, return the converted sequence. The output must be a sequence of scalars that can be used by the numpy array layer """ return obj @staticmethod def is_numlike(x): """ The matplotlib datalim, autoscaling, locators etc work with scalars which are the units converted to floats given the current unit. The converter may be passed these floats, or arrays of them, even when units are set. Derived conversion interfaces may opt to pass plain-ol unitless numbers through the conversion interface and this is a helper function for them. """ if iterable(x): for thisx in x: return is_numlike(thisx) else: return is_numlike(x) class Registry(dict): """ register types with conversion interface """ def __init__(self): dict.__init__(self) self._cached = {} def get_converter(self, x): 'get the converter interface instance for x, or None' if not len(self): return None # nothing registered # DISABLED idx = id(x) # DISABLED cached = self._cached.get(idx) # DISABLED if cached is not None: return cached converter = None classx = getattr(x, '__class__', None) if classx is not None: converter = self.get(classx) if isinstance(x, np.ndarray) and x.size: xravel = x.ravel() try: # pass the first value of x that is not masked back to # get_converter if not np.all(xravel.mask): # some elements are not masked converter = self.get_converter( xravel[np.argmin(xravel.mask)]) return converter except AttributeError: # not a masked_array # Make sure we don't recurse forever -- it's possible for # ndarray subclasses to continue to return subclasses and # not ever return a non-subclass for a single element. next_item = xravel[0] if (not isinstance(next_item, np.ndarray) or next_item.shape != x.shape): converter = self.get_converter(next_item) return converter if converter is None: try: thisx = safe_first_element(x) except (TypeError, StopIteration): pass else: if classx and classx != getattr(thisx, '__class__', None): converter = self.get_converter(thisx) return converter # DISABLED self._cached[idx] = converter return converter registry = Registry()
gpl-3.0
balazssimon/ml-playground
udemy/lazyprogrammer/reinforcement-learning-python/approx_mc_prediction.py
1
2661
import numpy as np import matplotlib.pyplot as plt from grid_world import standard_grid, negative_grid from iterative_policy_evaluation import print_values, print_policy # NOTE: this is only policy evaluation, not optimization # we'll try to obtain the same result as our other MC script from monte_carlo_random import random_action, play_game, SMALL_ENOUGH, GAMMA, ALL_POSSIBLE_ACTIONS LEARNING_RATE = 0.001 if __name__ == '__main__': # use the standard grid again (0 for every step) so that we can compare # to iterative policy evaluation grid = standard_grid() # print rewards print("rewards:") print_values(grid.rewards, grid) # state -> action # found by policy_iteration_random on standard_grid # MC method won't get exactly this, but should be close # values: # --------------------------- # 0.43| 0.56| 0.72| 0.00| # --------------------------- # 0.33| 0.00| 0.21| 0.00| # --------------------------- # 0.25| 0.18| 0.11| -0.17| # policy: # --------------------------- # R | R | R | | # --------------------------- # U | | U | | # --------------------------- # U | L | U | L | policy = { (2, 0): 'U', (1, 0): 'U', (0, 0): 'R', (0, 1): 'R', (0, 2): 'R', (1, 2): 'U', (2, 1): 'L', (2, 2): 'U', (2, 3): 'L', } # initialize theta # our model is V_hat = theta.dot(x) # where x = [row, col, row*col, 1] - 1 for bias term theta = np.random.randn(4) / 2 def s2x(s): return np.array([s[0] - 1, s[1] - 1.5, s[0]*s[1] - 3, 1]) # repeat until convergence deltas = [] t = 1.0 for it in range(20000): if it % 100 == 0: t += 0.01 alpha = LEARNING_RATE/t # generate an episode using pi biggest_change = 0 states_and_returns = play_game(grid, policy) seen_states = set() for s, G in states_and_returns: # check if we have already seen s # called "first-visit" MC policy evaluation if s not in seen_states: old_theta = theta.copy() x = s2x(s) V_hat = theta.dot(x) # grad(V_hat) wrt theta = x theta += alpha*(G - V_hat)*x biggest_change = max(biggest_change, np.abs(old_theta - theta).sum()) seen_states.add(s) deltas.append(biggest_change) plt.plot(deltas) plt.show() # obtain predicted values V = {} states = grid.all_states() for s in states: if s in grid.actions: V[s] = theta.dot(s2x(s)) else: # terminal state or state we can't otherwise get to V[s] = 0 print("values:") print_values(V, grid) print("policy:") print_policy(policy, grid)
apache-2.0
mbayon/TFG-MachineLearning
vbig/lib/python2.7/site-packages/pandas/io/json/table_schema.py
12
5184
""" Table Schema builders http://specs.frictionlessdata.io/json-table-schema/ """ from pandas.core.dtypes.common import ( is_integer_dtype, is_timedelta64_dtype, is_numeric_dtype, is_bool_dtype, is_datetime64_dtype, is_datetime64tz_dtype, is_categorical_dtype, is_period_dtype, is_string_dtype ) def as_json_table_type(x): """ Convert a NumPy / pandas type to its corresponding json_table. Parameters ---------- x : array or dtype Returns ------- t : str the Table Schema data types Notes ----- This table shows the relationship between NumPy / pandas dtypes, and Table Schema dtypes. ============== ================= Pandas type Table Schema type ============== ================= int64 integer float64 number bool boolean datetime64[ns] datetime timedelta64[ns] duration object str categorical any =============== ================= """ if is_integer_dtype(x): return 'integer' elif is_bool_dtype(x): return 'boolean' elif is_numeric_dtype(x): return 'number' elif (is_datetime64_dtype(x) or is_datetime64tz_dtype(x) or is_period_dtype(x)): return 'datetime' elif is_timedelta64_dtype(x): return 'duration' elif is_categorical_dtype(x): return 'any' elif is_string_dtype(x): return 'string' else: return 'any' def set_default_names(data): """Sets index names to 'index' for regular, or 'level_x' for Multi""" if all(name is not None for name in data.index.names): return data data = data.copy() if data.index.nlevels > 1: names = [name if name is not None else 'level_{}'.format(i) for i, name in enumerate(data.index.names)] data.index.names = names else: data.index.name = data.index.name or 'index' return data def make_field(arr, dtype=None): dtype = dtype or arr.dtype if arr.name is None: name = 'values' else: name = arr.name field = {'name': name, 'type': as_json_table_type(dtype)} if is_categorical_dtype(arr): if hasattr(arr, 'categories'): cats = arr.categories ordered = arr.ordered else: cats = arr.cat.categories ordered = arr.cat.ordered field['constraints'] = {"enum": list(cats)} field['ordered'] = ordered elif is_period_dtype(arr): field['freq'] = arr.freqstr elif is_datetime64tz_dtype(arr): if hasattr(arr, 'dt'): field['tz'] = arr.dt.tz.zone else: field['tz'] = arr.tz.zone return field def build_table_schema(data, index=True, primary_key=None, version=True): """ Create a Table schema from ``data``. Parameters ---------- data : Series, DataFrame index : bool, default True Whether to include ``data.index`` in the schema. primary_key : bool or None, default True column names to designate as the primary key. The default `None` will set `'primaryKey'` to the index level or levels if the index is unique. version : bool, default True Whether to include a field `pandas_version` with the version of pandas that generated the schema. Returns ------- schema : dict Examples -------- >>> df = pd.DataFrame( ... {'A': [1, 2, 3], ... 'B': ['a', 'b', 'c'], ... 'C': pd.date_range('2016-01-01', freq='d', periods=3), ... }, index=pd.Index(range(3), name='idx')) >>> build_table_schema(df) {'fields': [{'name': 'idx', 'type': 'integer'}, {'name': 'A', 'type': 'integer'}, {'name': 'B', 'type': 'string'}, {'name': 'C', 'type': 'datetime'}], 'pandas_version': '0.20.0', 'primaryKey': ['idx']} Notes ----- See `_as_json_table_type` for conversion types. Timedeltas as converted to ISO8601 duration format with 9 decimal places after the secnods field for nanosecond precision. Categoricals are converted to the `any` dtype, and use the `enum` field constraint to list the allowed values. The `ordered` attribute is included in an `ordered` field. """ if index is True: data = set_default_names(data) schema = {} fields = [] if index: if data.index.nlevels > 1: for level in data.index.levels: fields.append(make_field(level)) else: fields.append(make_field(data.index)) if data.ndim > 1: for column, s in data.iteritems(): fields.append(make_field(s)) else: fields.append(make_field(data)) schema['fields'] = fields if index and data.index.is_unique and primary_key is None: if data.index.nlevels == 1: schema['primaryKey'] = [data.index.name] else: schema['primaryKey'] = data.index.names elif primary_key is not None: schema['primaryKey'] = primary_key if version: schema['pandas_version'] = '0.20.0' return schema
mit
olologin/scikit-learn
examples/svm/plot_iris.py
225
3252
""" ================================================== Plot different SVM classifiers in the iris dataset ================================================== Comparison of different linear SVM classifiers on a 2D projection of the iris dataset. We only consider the first 2 features of this dataset: - Sepal length - Sepal width This example shows how to plot the decision surface for four SVM classifiers with different kernels. The linear models ``LinearSVC()`` and ``SVC(kernel='linear')`` yield slightly different decision boundaries. This can be a consequence of the following differences: - ``LinearSVC`` minimizes the squared hinge loss while ``SVC`` minimizes the regular hinge loss. - ``LinearSVC`` uses the One-vs-All (also known as One-vs-Rest) multiclass reduction while ``SVC`` uses the One-vs-One multiclass reduction. Both linear models have linear decision boundaries (intersecting hyperplanes) while the non-linear kernel models (polynomial or Gaussian RBF) have more flexible non-linear decision boundaries with shapes that depend on the kind of kernel and its parameters. .. NOTE:: while plotting the decision function of classifiers for toy 2D datasets can help get an intuitive understanding of their respective expressive power, be aware that those intuitions don't always generalize to more realistic high-dimensional problems. """ print(__doc__) import numpy as np import matplotlib.pyplot as plt from sklearn import svm, datasets # import some data to play with iris = datasets.load_iris() X = iris.data[:, :2] # we only take the first two features. We could # avoid this ugly slicing by using a two-dim dataset y = iris.target h = .02 # step size in the mesh # we create an instance of SVM and fit out data. We do not scale our # data since we want to plot the support vectors C = 1.0 # SVM regularization parameter svc = svm.SVC(kernel='linear', C=C).fit(X, y) rbf_svc = svm.SVC(kernel='rbf', gamma=0.7, C=C).fit(X, y) poly_svc = svm.SVC(kernel='poly', degree=3, C=C).fit(X, y) lin_svc = svm.LinearSVC(C=C).fit(X, y) # create a mesh to plot in x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1 y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1 xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h)) # title for the plots titles = ['SVC with linear kernel', 'LinearSVC (linear kernel)', 'SVC with RBF kernel', 'SVC with polynomial (degree 3) kernel'] for i, clf in enumerate((svc, lin_svc, rbf_svc, poly_svc)): # Plot the decision boundary. For that, we will assign a color to each # point in the mesh [x_min, m_max]x[y_min, y_max]. plt.subplot(2, 2, i + 1) plt.subplots_adjust(wspace=0.4, hspace=0.4) Z = clf.predict(np.c_[xx.ravel(), yy.ravel()]) # Put the result into a color plot Z = Z.reshape(xx.shape) plt.contourf(xx, yy, Z, cmap=plt.cm.Paired, alpha=0.8) # Plot also the training points plt.scatter(X[:, 0], X[:, 1], c=y, cmap=plt.cm.Paired) plt.xlabel('Sepal length') plt.ylabel('Sepal width') plt.xlim(xx.min(), xx.max()) plt.ylim(yy.min(), yy.max()) plt.xticks(()) plt.yticks(()) plt.title(titles[i]) plt.show()
bsd-3-clause
afruizc/microsoft_malware_challenge
src/models/first_model/get_conf_matrix.py
2
2842
""" This is a script that is used to generate a confussion matrix for a classification method. This uses 10-k cross_validation with in order to provide sensible resutls and not overfit. """ __author__ = "Andres Ruiz" __license__ = "Apache" __email__ = "afruizc __thingy__ cs unm edu" import numpy as np from sklearn.cross_validation import KFold from sklearn.metrics import confusion_matrix, accuracy_score, log_loss import matplotlib matplotlib.use('Agg') import matplotlib.pyplot as plt import svm_bow def plot_confusion_matrix(cm, title='Confusion matrix', normalized=True, cmap=plt.cm.Oranges, save_file=""): """ Displays the confussion matrix indicated by `cm`. If argument `normalized` is Ture, then the matrix is normalized. Optionally the image can be saved to a file Arguments: ---------- `cm`: The confusion matrix to be displayed. `title`: The title for the window. `normalized`: If True, normalizes the matrix before showing it. `cmap`: Colormap to use. `save_file`: If string different than empty, the resulting image is stored in such file. """ if normalized: cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis] plt.imshow(cm, interpolation='nearest', cmap=cmap) plt.title(title) plt.colorbar() plt.tight_layout() plt.ylabel('True label') plt.xlabel('Predicted label') if save_file: plt.savefig(save_file) def get_indices(data, indices): result = [] for i in indices: result.append(data[i]) return result def main(): e = svm_bow.Executor() e.load_data() e.config_model() fold = KFold(len(e.train['data']), n_folds=10) conf_mat_avg = np.zeros((9, 9)) c = 0 for train, test in fold: X_train = get_indices(e.train['data'], train) X_test = get_indices(e.train['data'], test) y_train = get_indices(e.train['target'], train) y_test = get_indices(e.train['target'], test) c += 1 print("Fitting run {}.".format(c)) model = e.param_tunning.fit(X_train, y_train) print("Predicting...") y_pred = model.predict(X_test) y_pred_prob = model.predict_proba(X_test) conf_matrix = confusion_matrix(y_test, y_pred) accruacy = accuracy_score(y_test, y_pred) loss = log_loss(y_test, y_pred_prob) plot_confusion_matrix(conf_matrix, save_file='fold_{}.png'.format(c)) np.savetxt('conf_matrix_fold{}'.format(c), conf_matrix) print("Fold %d. Accuracy: %lf Loss: %lf" % (c, accruacy, loss)) conf_mat_avg += conf_matrix np.savetxt('conf_matrix.txt', conf_mat_avg) conf_mat_avg /= 10.0 plot_confusion_matrix(conf_mat_avg, save_file='final_cm.png') if __name__ == '__main__': main()
apache-2.0
joelfrederico/SciSalt
scisalt/qt/mplwidget.py
1
13557
from PyQt4 import QtGui from PyQt4 import QtCore from matplotlib.backends.backend_qt4agg import FigureCanvasQTAgg as _FigureCanvas from matplotlib.backends.backend_qt4 import NavigationToolbar2QT as _NavigationToolbar import matplotlib as _mpl import numpy as _np from .Rectangle import Rectangle import pdb import traceback import logging loggerlevel = logging.DEBUG logger = logging.getLogger(__name__) try: _fromUtf8 = QtCore.QString.fromUtf8 except AttributeError: def _fromUtf8(s): return s try: _encoding = QtGui.QApplication.UnicodeUTF8 def _translate(context, text, disambig): return QtGui.QApplication.translate(context, text, disambig, _encoding) except AttributeError: def _translate(context, text, disambig): return QtGui.QApplication.translate(context, text, disambig) class Slider_and_Text(QtGui.QWidget): valueChanged = QtCore.pyqtSignal(int) sliderReleased = QtCore.pyqtSignal(int) def __init__(self, parent=None): QtGui.QWidget.__init__(self) self.setMaximumHeight(40) # Enable tracking by default self._tracking = True self.hLayout = QtGui.QHBoxLayout() self.slider = QtGui.QSlider() self.leftbutton = QtGui.QPushButton() self.leftbutton.setText("<") sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Minimum) sizePolicy.setHorizontalStretch(0) sizePolicy.setVerticalStretch(0) sizePolicy.setHeightForWidth(self.leftbutton.sizePolicy().hasHeightForWidth()) # self.leftbutton.setSizePolicy(sizePolicy) self.leftbutton.clicked.connect(self._subone) self.rightbutton = QtGui.QPushButton() self.rightbutton.setText(">") sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Minimum) sizePolicy.setHorizontalStretch(0) sizePolicy.setVerticalStretch(0) sizePolicy.setHeightForWidth(self.rightbutton.sizePolicy().hasHeightForWidth()) # self.rightbutton.setSizePolicy(sizePolicy) self.rightbutton.clicked.connect(self._addone) self.v = QtGui.QIntValidator() self.box = QtGui.QLineEdit() self.box.setValidator(self.v) sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Minimum) sizePolicy.setHorizontalStretch(0) sizePolicy.setVerticalStretch(0) sizePolicy.setHeightForWidth(self.box.sizePolicy().hasHeightForWidth()) # self.box.setSizePolicy(sizePolicy) self.hLayout.addWidget(self.leftbutton) self.hLayout.addWidget(self.slider) self.hLayout.addWidget(self.box) self.hLayout.addWidget(self.rightbutton) self.setLayout(self.hLayout) self.slider.valueChanged.connect(self._sliderChanged) self.box.editingFinished.connect(self._textChanged) self.setOrientation(QtCore.Qt.Horizontal) # Connect release so tracking works as expected self.slider.sliderReleased.connect(self._sliderReleased) def _addone(self): self.value = self.value + 1 self.valueChanged.emit(self.value) def _subone(self): self.value = self.value - 1 self.valueChanged.emit(self.value) def _sliderReleased(self): print('Released') self.sliderReleased.emit(self.slider.value) def setTracking(self, val): print('Tracking set to {}'.format(val)) self._tracking = val def setMaximum(self, val): self.slider.setMaximum(val) self.v.setRange(self.slider.minimum(), self.slider.maximum()) self.box.setValidator(self.v) def setMinimum(self, val): self.slider.setMinimum(val) self.v.setRange(self.slider.minimum(), self.slider.maximum()) self.box.setValidator(self.v) def _sliderChanged(self, val): self.box.setText(str(val)) if self._tracking: try: self.slider.sliderReleased.disconnect() except: pass self.valueChanged.emit(val) else: try: self.slider.sliderReleased.disconnect() except: pass self.slider.sliderReleased.connect(self._sliderChanged_notracking) def _sliderChanged_notracking(self): val = self.slider.value() # print('Value to be emitted is {}'.format(val)) self.valueChanged.emit(val) def _textChanged(self): val = self.box.text() self.slider.setValue(int(val)) self._sliderChanged_notracking() def setOrientation(self, *args, **kwargs): self.slider.setOrientation(*args, **kwargs) def _getValue(self): return self.slider.value() def _setValue(self, val): self.slider.setValue(val) self.box.setText(str(val)) value = property(_getValue, _setValue) def setValue(self, val): self.slider.setValue(val) self.box.setText(str(val)) # self.valueChanged.emit(val) class Mpl_Plot(_FigureCanvas): def __init__(self, parent=None): # Initialize things self.fig = _mpl.figure.Figure() _FigureCanvas.__init__(self, self.fig) _FigureCanvas.setSizePolicy(self, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Expanding) _FigureCanvas.updateGeometry(self) # Create axes self.ax = self.fig.add_subplot(111) def plot(self, *args, **kwargs): self.ax.clear() self.ax.plot(*args, **kwargs) self.ax.ticklabel_format(style='sci', scilimits=(0, 0), axis='y') self.ax.figure.canvas.draw() class Mpl_Image(QtGui.QWidget): # Signal for when the rectangle is changed rectChanged = QtCore.pyqtSignal(Rectangle) def __init__(self, parent=None, rectbool = True, toolbarbool=False, image=None): # Initialize things QtGui.QWidget.__init__(self) self.rectbool = rectbool self._clim_min = 0 self._clim_max = 3600 self._pressed = False # Add a vertical layout self.vLayout = QtGui.QVBoxLayout() # Add a figure self.fig = _mpl.figure.Figure() # Add a canvas containing the fig self.canvas = _FigureCanvas(self.fig) _FigureCanvas.setSizePolicy(self.canvas, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Expanding) _FigureCanvas.updateGeometry(self.canvas) # Setup the layout if toolbarbool: self.toolbar = _NavigationToolbar(self.canvas, self) self.toolbar.setMaximumHeight(20) self.vLayout.addWidget(self.toolbar) self.vLayout.addWidget(self.canvas) self.setLayout(self.vLayout) # Create axes self.ax = self.fig.add_subplot(111) # Include rectangle functionality if rectbool: self.fig.canvas.mpl_connect('button_press_event', self.on_press) self.fig.canvas.mpl_connect('button_release_event', self.on_release) self.Rectangle = Rectangle( x = -10 , y = 0 , width = 0 , height = 3 , axes = self.ax ) # Add image self.image = image def _get_img(self): return self._image def _set_img(self, image): self.ax.clear() self._image = image if image is not None: self._imgplot = self.ax.imshow(image, interpolation='none') if self.rectbool: self.ax.add_patch(self.Rectangle.get_rect()) # imagemax = _np.max(_np.max(image)) self.set_clim(self._clim_min, self._clim_max) image = property(_get_img, _set_img) def set_clim(self, clim_min, clim_max): if self.image is not None: self._clim_min = clim_min self._clim_max = clim_max self._imgplot.set_clim(clim_min, clim_max) self.ax.figure.canvas.draw() def on_press(self, event): if self.toolbar._active is None: self._pressed = True self.x0 = event.xdata self.y0 = event.ydata logger.log(level=loggerlevel, msg='Pressed: x0: {}, y0: {}'.format(self.x0, self.y0)) def on_release(self, event): if self._pressed: self._pressed = False print('release') self.x1 = event.xdata self.y1 = event.ydata width = self.x1 - self.x0 height = self.y1 - self.y0 logger.log(level=loggerlevel, msg='Released: x0: {}, y0: {}, x1: {}, y1: {}, width: {}, height: {}'.format( self.x0 , self.y0 , self.x1 , self.y1 , width , height ) ) self.Rectangle.set_xy((self.x0, self.y0)) self.Rectangle.set_width(width) self.Rectangle.set_height(height) self.ax.figure.canvas.draw() self.rectChanged.emit(self.Rectangle) # print(self.rect) def zoom_rect(self, border=None, border_px=None): # ====================================== # Get x coordinates # ====================================== x0 = self.Rectangle.get_x() width = self.Rectangle.get_width() x1 = x0+width # ====================================== # Get y coordinates # ====================================== y0 = self.Rectangle.get_y() height = self.Rectangle.get_height() y1 = y0+height # ====================================== # Validate borders # ====================================== if (border_px is None) and (border is not None): xborder = border[0]*width yborder = border[1]*height elif (border_px is not None) and (border is None): xborder = border_px[0] yborder = border_px[1] elif (border_px is None) and (border is None): raise IOError('No border info specified!') elif (border_px is not None) and (border is not None): raise IOError('Too much border info specified, both border_px and border!') else: raise IOError('End of the line!') # ====================================== # Add borders # ====================================== x0 = x0 - xborder x1 = x1 + xborder y0 = y0 - yborder y1 = y1 + yborder # ====================================== # Validate coordinates to prevent # unPythonic crash # ====================================== if not ((0 <= x0 and x0 <= self.image.shape[1]) and (0 <= x1 and x1 <= self.image.shape[1])): print('X issue') print('Requested: x=({}, {})'.format(x0, x1)) x0 = 0 x1 = self.image.shape[1] if not ((0 <= y0 and y0 <= self.image.shape[0]) and (0 <= y1 and y1 <= self.image.shape[0])): print('y issue') print('Requested: y=({}, {})'.format(y0, y1)) y0 = 0 y1 = self.image.shape[0] # ====================================== # Set viewable area # ====================================== self.ax.set_xlim(x0, x1) self.ax.set_ylim(y0, y1) # ====================================== # Redraw canvas to show updates # ====================================== self.ax.figure.canvas.draw() class Mpl_Image_Plus_Slider(QtGui.QWidget): # def __init__(self, parent=None, **kwargs): def __init__(self, parent=None, **kwargs): # Initialize self as a widget QtGui.QWidget.__init__(self, parent) # Add a vertical layout with parent self self.vLayout = QtGui.QVBoxLayout(self) self.vLayout.setObjectName(_fromUtf8("vLayout")) # Add an Mpl_Image widget to vLayout, # save it to self._img # Pass arguments through to Mpl_Image. self._img = Mpl_Image(parent=parent, toolbarbool=True, **kwargs) self._img.setObjectName(_fromUtf8("_img")) self.vLayout.addWidget(self._img) # Add a slider to vLayout, # save it to self.max_slider # self.max_slider = QtGui.QSlider(self) self.max_slider = Slider_and_Text(self) self.max_slider.setObjectName(_fromUtf8("max_slider")) self.max_slider.setOrientation(QtCore.Qt.Horizontal) self.vLayout.addWidget(self.max_slider) # Setup slider to work with _img's clims self.max_slider.valueChanged.connect(lambda val: self.set_clim(0, val)) def _get_image(self): return self._img.image def _set_image(self, image): self._img.image = image maximage = _np.max(_np.max(image)) self.max_slider.setMaximum(maximage) image = property(_get_image, _set_image) def _get_ax(self): return self._img.ax ax = property(_get_ax) def _get_Rectangle(self): return self._img.Rectangle # def _set_rect(self, rect): # self._img.rect(rect) Rectangle = property(_get_Rectangle) def zoom_rect(self, border=None, border_px=None): self._img.zoom_rect(border, border_px) def set_clim(self, *args, **kwargs): self._img.set_clim(*args, **kwargs) def setSliderValue(self, val): self.max_slider.setValue(val)
mit
wilsonkichoi/zipline
zipline/data/data_portal.py
1
64491
# # Copyright 2016 Quantopian, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from operator import mul import bcolz from logbook import Logger import numpy as np import pandas as pd from pandas.tslib import normalize_date from six import iteritems from six.moves import reduce from zipline.assets import Asset, Future, Equity from zipline.data.us_equity_pricing import NoDataOnDate from zipline.data.us_equity_loader import ( USEquityDailyHistoryLoader, USEquityMinuteHistoryLoader, ) from zipline.utils import tradingcalendar from zipline.utils.math_utils import ( nansum, nanmean, nanstd ) from zipline.utils.memoize import remember_last, weak_lru_cache from zipline.errors import ( NoTradeDataAvailableTooEarly, NoTradeDataAvailableTooLate, HistoryWindowStartsBeforeData, ) log = Logger('DataPortal') BASE_FIELDS = frozenset([ "open", "high", "low", "close", "volume", "price", "last_traded" ]) OHLCV_FIELDS = frozenset([ "open", "high", "low", "close", "volume" ]) OHLCVP_FIELDS = frozenset([ "open", "high", "low", "close", "volume", "price" ]) HISTORY_FREQUENCIES = set(["1m", "1d"]) class DailyHistoryAggregator(object): """ Converts minute pricing data into a daily summary, to be used for the last slot in a call to history with a frequency of `1d`. This summary is the same as a daily bar rollup of minute data, with the distinction that the summary is truncated to the `dt` requested. i.e. the aggregation slides forward during a the course of simulation day. Provides aggregation for `open`, `high`, `low`, `close`, and `volume`. The aggregation rules for each price type is documented in their respective """ def __init__(self, market_opens, minute_reader): self._market_opens = market_opens self._minute_reader = minute_reader # The caches are structured as (date, market_open, entries), where # entries is a dict of asset -> (last_visited_dt, value) # # Whenever an aggregation method determines the current value, # the entry for the respective asset should be overwritten with a new # entry for the current dt.value (int) and aggregation value. # # When the requested dt's date is different from date the cache is # flushed, so that the cache entries do not grow unbounded. # # Example cache: # cache = (date(2016, 3, 17), # pd.Timestamp('2016-03-17 13:31', tz='UTC'), # { # 1: (1458221460000000000, np.nan), # 2: (1458221460000000000, 42.0), # }) self._caches = { 'open': None, 'high': None, 'low': None, 'close': None, 'volume': None } # The int value is used for deltas to avoid extra computation from # creating new Timestamps. self._one_min = pd.Timedelta('1 min').value def _prelude(self, dt, field): date = dt.date() dt_value = dt.value cache = self._caches[field] if cache is None or cache[0] != date: market_open = self._market_opens.loc[date] cache = self._caches[field] = (dt.date(), market_open, {}) _, market_open, entries = cache if dt != market_open: prev_dt = dt_value - self._one_min else: prev_dt = None return market_open, prev_dt, dt_value, entries def opens(self, assets, dt): """ The open field's aggregation returns the first value that occurs for the day, if there has been no data on or before the `dt` the open is `nan`. Once the first non-nan open is seen, that value remains constant per asset for the remainder of the day. Returns ------- np.array with dtype=float64, in order of assets parameter. """ market_open, prev_dt, dt_value, entries = self._prelude(dt, 'open') opens = [] normalized_date = normalize_date(dt) for asset in assets: if not asset._is_alive(normalized_date, True): opens.append(np.NaN) continue if prev_dt is None: val = self._minute_reader.get_value(asset, dt, 'open') entries[asset] = (dt_value, val) opens.append(val) continue else: try: last_visited_dt, first_open = entries[asset] if last_visited_dt == dt_value: opens.append(first_open) continue elif not pd.isnull(first_open): opens.append(first_open) entries[asset] = (dt_value, first_open) continue else: after_last = pd.Timestamp( last_visited_dt + self._one_min, tz='UTC') window = self._minute_reader.load_raw_arrays( ['open'], after_last, dt, [asset], )[0] nonnan = window[~pd.isnull(window)] if len(nonnan): val = nonnan[0] else: val = np.nan entries[asset] = (dt_value, val) opens.append(val) continue except KeyError: window = self._minute_reader.load_raw_arrays( ['open'], market_open, dt, [asset], )[0] nonnan = window[~pd.isnull(window)] if len(nonnan): val = nonnan[0] else: val = np.nan entries[asset] = (dt_value, val) opens.append(val) continue return np.array(opens) def highs(self, assets, dt): """ The high field's aggregation returns the largest high seen between the market open and the current dt. If there has been no data on or before the `dt` the high is `nan`. Returns ------- np.array with dtype=float64, in order of assets parameter. """ market_open, prev_dt, dt_value, entries = self._prelude(dt, 'high') highs = [] normalized_date = normalize_date(dt) for asset in assets: if not asset._is_alive(normalized_date, True): highs.append(np.NaN) continue if prev_dt is None: val = self._minute_reader.get_value(asset, dt, 'high') entries[asset] = (dt_value, val) highs.append(val) continue else: try: last_visited_dt, last_max = entries[asset] if last_visited_dt == dt_value: highs.append(last_max) continue elif last_visited_dt == prev_dt: curr_val = self._minute_reader.get_value( asset, dt, 'high') if pd.isnull(curr_val): val = last_max elif pd.isnull(last_max): val = curr_val else: val = max(last_max, curr_val) entries[asset] = (dt_value, val) highs.append(val) continue else: after_last = pd.Timestamp( last_visited_dt + self._one_min, tz='UTC') window = self._minute_reader.load_raw_arrays( ['high'], after_last, dt, [asset], )[0].T val = max(last_max, np.nanmax(window)) entries[asset] = (dt_value, val) highs.append(val) continue except KeyError: window = self._minute_reader.load_raw_arrays( ['high'], market_open, dt, [asset], )[0].T val = np.nanmax(window) entries[asset] = (dt_value, val) highs.append(val) continue return np.array(highs) def lows(self, assets, dt): """ The low field's aggregation returns the smallest low seen between the market open and the current dt. If there has been no data on or before the `dt` the low is `nan`. Returns ------- np.array with dtype=float64, in order of assets parameter. """ market_open, prev_dt, dt_value, entries = self._prelude(dt, 'low') lows = [] normalized_date = normalize_date(dt) for asset in assets: if not asset._is_alive(normalized_date, True): lows.append(np.NaN) continue if prev_dt is None: val = self._minute_reader.get_value(asset, dt, 'low') entries[asset] = (dt_value, val) lows.append(val) continue else: try: last_visited_dt, last_min = entries[asset] if last_visited_dt == dt_value: lows.append(last_min) continue elif last_visited_dt == prev_dt: curr_val = self._minute_reader.get_value( asset, dt, 'low') val = np.nanmin([last_min, curr_val]) entries[asset] = (dt_value, val) lows.append(val) continue else: after_last = pd.Timestamp( last_visited_dt + self._one_min, tz='UTC') window = self._minute_reader.load_raw_arrays( ['low'], after_last, dt, [asset], )[0].T window_min = np.nanmin(window) if pd.isnull(window_min): val = last_min else: val = min(last_min, window_min) entries[asset] = (dt_value, val) lows.append(val) continue except KeyError: window = self._minute_reader.load_raw_arrays( ['low'], market_open, dt, [asset], )[0].T val = np.nanmin(window) entries[asset] = (dt_value, val) lows.append(val) continue return np.array(lows) def closes(self, assets, dt): """ The close field's aggregation returns the latest close at the given dt. If the close for the given dt is `nan`, the most recent non-nan `close` is used. If there has been no data on or before the `dt` the close is `nan`. Returns ------- np.array with dtype=float64, in order of assets parameter. """ market_open, prev_dt, dt_value, entries = self._prelude(dt, 'close') closes = [] normalized_dt = normalize_date(dt) for asset in assets: if not asset._is_alive(normalized_dt, True): closes.append(np.NaN) continue if prev_dt is None: val = self._minute_reader.get_value(asset, dt, 'close') entries[asset] = (dt_value, val) closes.append(val) continue else: try: last_visited_dt, last_close = entries[asset] if last_visited_dt == dt_value: closes.append(last_close) continue elif last_visited_dt == prev_dt: val = self._minute_reader.get_value( asset, dt, 'close') if pd.isnull(val): val = last_close entries[asset] = (dt_value, val) closes.append(val) continue else: val = self._minute_reader.get_value( asset, dt, 'close') if pd.isnull(val): val = self.closes( [asset], pd.Timestamp(prev_dt, tz='UTC'))[0] entries[asset] = (dt_value, val) closes.append(val) continue except KeyError: val = self._minute_reader.get_value( asset, dt, 'close') if pd.isnull(val): val = self.closes([asset], pd.Timestamp(prev_dt, tz='UTC'))[0] entries[asset] = (dt_value, val) closes.append(val) continue return np.array(closes) def volumes(self, assets, dt): """ The volume field's aggregation returns the sum of all volumes between the market open and the `dt` If there has been no data on or before the `dt` the volume is 0. Returns ------- np.array with dtype=int64, in order of assets parameter. """ market_open, prev_dt, dt_value, entries = self._prelude(dt, 'volume') volumes = [] normalized_date = normalize_date(dt) for asset in assets: if not asset._is_alive(normalized_date, True): volumes.append(0) continue if prev_dt is None: val = self._minute_reader.get_value(asset, dt, 'volume') entries[asset] = (dt_value, val) volumes.append(val) continue else: try: last_visited_dt, last_total = entries[asset] if last_visited_dt == dt_value: volumes.append(last_total) continue elif last_visited_dt == prev_dt: val = self._minute_reader.get_value( asset, dt, 'volume') val += last_total entries[asset] = (dt_value, val) volumes.append(val) continue else: after_last = pd.Timestamp( last_visited_dt + self._one_min, tz='UTC') window = self._minute_reader.load_raw_arrays( ['volume'], after_last, dt, [asset], )[0] val = np.nansum(window) + last_total entries[asset] = (dt_value, val) volumes.append(val) continue except KeyError: window = self._minute_reader.load_raw_arrays( ['volume'], market_open, dt, [asset], )[0] val = np.nansum(window) entries[asset] = (dt_value, val) volumes.append(val) continue return np.array(volumes) class DataPortal(object): """Interface to all of the data that a zipline simulation needs. This is used by the simulation runner to answer questions about the data, like getting the prices of assets on a given day or to service history calls. Parameters ---------- env : TradingEnvironment The trading environment for the simulation. This includes the trading calendar and benchmark data. first_trading_day : pd.Timestamp The first trading day for the simulation. equity_daily_reader : BcolzDailyBarReader, optional The daily bar reader for equities. This will be used to service daily data backtests or daily history calls in a minute backetest. If a daily bar reader is not provided but a minute bar reader is, the minutes will be rolled up to serve the daily requests. equity_minute_reader : BcolzMinuteBarReader, optional The minute bar reader for equities. This will be used to service minute data backtests or minute history calls. This can be used to serve daily calls if no daily bar reader is provided. future_daily_reader : BcolzDailyBarReader, optional The daily bar ready for futures. This will be used to service daily data backtests or daily history calls in a minute backetest. If a daily bar reader is not provided but a minute bar reader is, the minutes will be rolled up to serve the daily requests. future_minute_reader : BcolzMinuteBarReader, optional The minute bar reader for futures. This will be used to service minute data backtests or minute history calls. This can be used to serve daily calls if no daily bar reader is provided. adjustment_reader : SQLiteAdjustmentWriter, optional The adjustment reader. This is used to apply splits, dividends, and other adjustment data to the raw data from the readers. """ def __init__(self, env, first_trading_day, equity_daily_reader=None, equity_minute_reader=None, future_daily_reader=None, future_minute_reader=None, adjustment_reader=None): self.env = env self.views = {} self._asset_finder = env.asset_finder self._carrays = { 'open': {}, 'high': {}, 'low': {}, 'close': {}, 'volume': {}, 'sid': {}, } self._adjustment_reader = adjustment_reader # caches of sid -> adjustment list self._splits_dict = {} self._mergers_dict = {} self._dividends_dict = {} # Cache of sid -> the first trading day of an asset. self._asset_start_dates = {} self._asset_end_dates = {} # Handle extra sources, like Fetcher. self._augmented_sources_map = {} self._extra_source_df = None self._equity_daily_reader = equity_daily_reader if self._equity_daily_reader is not None: self._equity_history_loader = USEquityDailyHistoryLoader( self.env, self._equity_daily_reader, self._adjustment_reader ) self._equity_minute_reader = equity_minute_reader self._future_daily_reader = future_daily_reader self._future_minute_reader = future_minute_reader self._first_trading_day = first_trading_day if self._equity_minute_reader is not None: self._equity_daily_aggregator = DailyHistoryAggregator( self.env.open_and_closes.market_open, self._equity_minute_reader) self._equity_minute_history_loader = USEquityMinuteHistoryLoader( self.env, self._equity_minute_reader, self._adjustment_reader ) self.MINUTE_PRICE_ADJUSTMENT_FACTOR = \ self._equity_minute_reader._ohlc_inverse def _reindex_extra_source(self, df, source_date_index): return df.reindex(index=source_date_index, method='ffill') def handle_extra_source(self, source_df, sim_params): """ Extra sources always have a sid column. We expand the given data (by forward filling) to the full range of the simulation dates, so that lookup is fast during simulation. """ if source_df is None: return # Normalize all the dates in the df source_df.index = source_df.index.normalize() # source_df's sid column can either consist of assets we know about # (such as sid(24)) or of assets we don't know about (such as # palladium). # # In both cases, we break up the dataframe into individual dfs # that only contain a single asset's information. ie, if source_df # has data for PALLADIUM and GOLD, we split source_df into two # dataframes, one for each. (same applies if source_df has data for # AAPL and IBM). # # We then take each child df and reindex it to the simulation's date # range by forward-filling missing values. this makes reads simpler. # # Finally, we store the data. For each column, we store a mapping in # self.augmented_sources_map from the column to a dictionary of # asset -> df. In other words, # self.augmented_sources_map['days_to_cover']['AAPL'] gives us the df # holding that data. source_date_index = self.env.days_in_range( start=sim_params.period_start, end=sim_params.period_end ) # Break the source_df up into one dataframe per sid. This lets # us (more easily) calculate accurate start/end dates for each sid, # de-dup data, and expand the data to fit the backtest start/end date. grouped_by_sid = source_df.groupby(["sid"]) group_names = grouped_by_sid.groups.keys() group_dict = {} for group_name in group_names: group_dict[group_name] = grouped_by_sid.get_group(group_name) # This will be the dataframe which we query to get fetcher assets at # any given time. Get's overwritten every time there's a new fetcher # call extra_source_df = pd.DataFrame() for identifier, df in iteritems(group_dict): # Before reindexing, save the earliest and latest dates earliest_date = df.index[0] latest_date = df.index[-1] # Since we know this df only contains a single sid, we can safely # de-dupe by the index (dt). If minute granularity, will take the # last data point on any given day df = df.groupby(level=0).last() # Reindex the dataframe based on the backtest start/end date. # This makes reads easier during the backtest. df = self._reindex_extra_source(df, source_date_index) if not isinstance(identifier, Asset): # for fake assets we need to store a start/end date self._asset_start_dates[identifier] = earliest_date self._asset_end_dates[identifier] = latest_date for col_name in df.columns.difference(['sid']): if col_name not in self._augmented_sources_map: self._augmented_sources_map[col_name] = {} self._augmented_sources_map[col_name][identifier] = df # Append to extra_source_df the reindexed dataframe for the single # sid extra_source_df = extra_source_df.append(df) self._extra_source_df = extra_source_df def _open_minute_file(self, field, asset): sid_str = str(int(asset)) try: carray = self._carrays[field][sid_str] except KeyError: carray = self._carrays[field][sid_str] = \ self._get_ctable(asset)[field] return carray def _get_ctable(self, asset): sid = int(asset) if isinstance(asset, Future): if self._future_minute_reader.sid_path_func is not None: path = self._future_minute_reader.sid_path_func( self._future_minute_reader.rootdir, sid ) else: path = "{0}/{1}.bcolz".format( self._future_minute_reader.rootdir, sid) elif isinstance(asset, Equity): if self._equity_minute_reader.sid_path_func is not None: path = self._equity_minute_reader.sid_path_func( self._equity_minute_reader.rootdir, sid ) else: path = "{0}/{1}.bcolz".format( self._equity_minute_reader.rootdir, sid) else: # TODO: Figure out if assets should be allowed if neither, and # why this code path is being hit. if self._equity_minute_reader.sid_path_func is not None: path = self._equity_minute_reader.sid_path_func( self._equity_minute_reader.rootdir, sid ) else: path = "{0}/{1}.bcolz".format( self._equity_minute_reader.rootdir, sid) return bcolz.open(path, mode='r') def get_last_traded_dt(self, asset, dt, data_frequency): """ Given an asset and dt, returns the last traded dt from the viewpoint of the given dt. If there is a trade on the dt, the answer is dt provided. """ if data_frequency == 'minute': return self._equity_minute_reader.get_last_traded_dt(asset, dt) elif data_frequency == 'daily': return self._equity_daily_reader.get_last_traded_dt(asset, dt) @staticmethod def _is_extra_source(asset, field, map): """ Internal method that determines if this asset/field combination represents a fetcher value or a regular OHLCVP lookup. """ # If we have an extra source with a column called "price", only look # at it if it's on something like palladium and not AAPL (since our # own price data always wins when dealing with assets). return not (field in BASE_FIELDS and isinstance(asset, Asset)) def _get_fetcher_value(self, asset, field, dt): day = normalize_date(dt) try: return \ self._augmented_sources_map[field][asset].loc[day, field] except KeyError: return np.NaN def get_spot_value(self, asset, field, dt, data_frequency): """ Public API method that returns a scalar value representing the value of the desired asset's field at either the given dt. Parameters ---------- asset : Asset The asset whose data is desired. field : {'open', 'high', 'low', 'close', 'volume', 'price', 'last_traded'} The desired field of the asset. dt : pd.Timestamp The timestamp for the desired value. data_frequency : str The frequency of the data to query; i.e. whether the data is 'daily' or 'minute' bars Returns ------- value : float, int, or pd.Timestamp The spot value of ``field`` for ``asset`` The return type is based on the ``field`` requested. If the field is one of 'open', 'high', 'low', 'close', or 'price', the value will be a float. If the ``field`` is 'volume' the value will be a int. If the ``field`` is 'last_traded' the value will be a Timestamp. """ if self._is_extra_source(asset, field, self._augmented_sources_map): return self._get_fetcher_value(asset, field, dt) if field not in BASE_FIELDS: raise KeyError("Invalid column: " + str(field)) if dt < asset.start_date or \ (data_frequency == "daily" and dt > asset.end_date) or \ (data_frequency == "minute" and normalize_date(dt) > asset.end_date): if field == "volume": return 0 elif field != "last_traded": return np.NaN if data_frequency == "daily": day_to_use = dt day_to_use = normalize_date(day_to_use) return self._get_daily_data(asset, field, day_to_use) else: if isinstance(asset, Future): return self._get_minute_spot_value_future( asset, field, dt) else: if field == "last_traded": return self._equity_minute_reader.get_last_traded_dt( asset, dt ) elif field == "price": return self._get_minute_spot_value(asset, "close", dt, True) else: return self._get_minute_spot_value(asset, field, dt) def get_adjustments(self, assets, field, dt, perspective_dt): """ Returns a list of adjustments between the dt and perspective_dt for the given field and list of assets Parameters ---------- assets : list of type Asset, or Asset The asset, or assets whose adjustments are desired. field : {'open', 'high', 'low', 'close', 'volume', \ 'price', 'last_traded'} The desired field of the asset. dt : pd.Timestamp The timestamp for the desired value. perspective_dt : pd.Timestamp The timestamp from which the data is being viewed back from. data_frequency : str The frequency of the data to query; i.e. whether the data is 'daily' or 'minute' bars Returns ------- adjustments : list[Adjustment] The adjustments to that field. """ if isinstance(assets, Asset): assets = [assets] adjustment_ratios_per_asset = [] split_adj_factor = lambda x: x if field != 'volume' else 1.0 / x for asset in assets: adjustments_for_asset = [] split_adjustments = self._get_adjustment_list( asset, self._splits_dict, "SPLITS" ) for adj_dt, adj in split_adjustments: if dt <= adj_dt <= perspective_dt: adjustments_for_asset.append(split_adj_factor(adj)) elif adj_dt > perspective_dt: break if field != 'volume': merger_adjustments = self._get_adjustment_list( asset, self._mergers_dict, "MERGERS" ) for adj_dt, adj in merger_adjustments: if dt <= adj_dt <= perspective_dt: adjustments_for_asset.append(adj) elif adj_dt > perspective_dt: break dividend_adjustments = self._get_adjustment_list( asset, self._dividends_dict, "DIVIDENDS", ) for adj_dt, adj in dividend_adjustments: if dt <= adj_dt <= perspective_dt: adjustments_for_asset.append(adj) elif adj_dt > perspective_dt: break ratio = reduce(mul, adjustments_for_asset, 1.0) adjustment_ratios_per_asset.append(ratio) return adjustment_ratios_per_asset def get_adjusted_value(self, asset, field, dt, perspective_dt, data_frequency, spot_value=None): """ Returns a scalar value representing the value of the desired asset's field at the given dt with adjustments applied. Parameters ---------- asset : Asset The asset whose data is desired. field : {'open', 'high', 'low', 'close', 'volume', \ 'price', 'last_traded'} The desired field of the asset. dt : pd.Timestamp The timestamp for the desired value. perspective_dt : pd.Timestamp The timestamp from which the data is being viewed back from. data_frequency : str The frequency of the data to query; i.e. whether the data is 'daily' or 'minute' bars Returns ------- value : float, int, or pd.Timestamp The value of the given ``field`` for ``asset`` at ``dt`` with any adjustments known by ``perspective_dt`` applied. The return type is based on the ``field`` requested. If the field is one of 'open', 'high', 'low', 'close', or 'price', the value will be a float. If the ``field`` is 'volume' the value will be a int. If the ``field`` is 'last_traded' the value will be a Timestamp. """ if spot_value is None: # if this a fetcher field, we want to use perspective_dt (not dt) # because we want the new value as of midnight (fetcher only works # on a daily basis, all timestamps are on midnight) if self._is_extra_source(asset, field, self._augmented_sources_map): spot_value = self.get_spot_value(asset, field, perspective_dt, data_frequency) else: spot_value = self.get_spot_value(asset, field, dt, data_frequency) if isinstance(asset, Equity): ratio = self.get_adjustments(asset, field, dt, perspective_dt)[0] spot_value *= ratio return spot_value def _get_minute_spot_value_future(self, asset, column, dt): # Futures bcolz files have 1440 bars per day (24 hours), 7 days a week. # The file attributes contain the "start_dt" and "last_dt" fields, # which represent the time period for this bcolz file. # The start_dt is midnight of the first day that this future started # trading. # figure out the # of minutes between dt and this asset's start_dt start_date = self._get_asset_start_date(asset) minute_offset = int((dt - start_date).total_seconds() / 60) if minute_offset < 0: # asking for a date that is before the asset's start date, no dice return 0.0 # then just index into the bcolz carray at that offset carray = self._open_minute_file(column, asset) result = carray[minute_offset] # if there's missing data, go backwards until we run out of file while result == 0 and minute_offset > 0: minute_offset -= 1 result = carray[minute_offset] if column != 'volume': # FIXME switch to a futures reader return result * 0.001 else: return result def _get_minute_spot_value(self, asset, column, dt, ffill=False): result = self._equity_minute_reader.get_value( asset.sid, dt, column ) if column == "volume": if result == 0: return 0 elif not ffill or not np.isnan(result): # if we're not forward filling, or we found a result, return it return result # we are looking for price, and didn't find one. have to go hunting. last_traded_dt = \ self._equity_minute_reader.get_last_traded_dt(asset, dt) if last_traded_dt is pd.NaT: # no last traded dt, bail return np.nan # get the value as of the last traded dt result = self._equity_minute_reader.get_value( asset.sid, last_traded_dt, column ) if np.isnan(result): return np.nan if dt == last_traded_dt or dt.date() == last_traded_dt.date(): return result # the value we found came from a different day, so we have to adjust # the data if there are any adjustments on that day barrier return self.get_adjusted_value( asset, column, last_traded_dt, dt, "minute", spot_value=result ) def _get_daily_data(self, asset, column, dt): if column == "last_traded": last_traded_dt = \ self._equity_daily_reader.get_last_traded_dt(asset, dt) if pd.isnull(last_traded_dt): return pd.NaT else: return last_traded_dt elif column in OHLCV_FIELDS: # don't forward fill try: val = self._equity_daily_reader.spot_price(asset, dt, column) if val == -1: if column == "volume": return 0 else: return np.nan else: return val except NoDataOnDate: return np.nan elif column == "price": found_dt = dt while True: try: value = self._equity_daily_reader.spot_price( asset, found_dt, "close" ) if value != -1: if dt == found_dt: return value else: # adjust if needed return self.get_adjusted_value( asset, column, found_dt, dt, "minute", spot_value=value ) else: found_dt -= tradingcalendar.trading_day except NoDataOnDate: return np.nan @remember_last def _get_days_for_window(self, end_date, bar_count): tds = self.env.trading_days end_loc = self.env.trading_days.get_loc(end_date) start_loc = end_loc - bar_count + 1 if start_loc < 0: raise HistoryWindowStartsBeforeData( first_trading_day=self.env.first_trading_day.date(), bar_count=bar_count, suggested_start_day=tds[bar_count].date(), ) return tds[start_loc:end_loc + 1] def _get_history_daily_window(self, assets, end_dt, bar_count, field_to_use): """ Internal method that returns a dataframe containing history bars of daily frequency for the given sids. """ days_for_window = self._get_days_for_window(end_dt.date(), bar_count) if len(assets) == 0: return pd.DataFrame(None, index=days_for_window, columns=None) future_data = [] eq_assets = [] for asset in assets: if isinstance(asset, Future): future_data.append(self._get_history_daily_window_future( asset, days_for_window, end_dt, field_to_use )) else: eq_assets.append(asset) eq_data = self._get_history_daily_window_equities( eq_assets, days_for_window, end_dt, field_to_use ) if future_data: # TODO: This case appears to be uncovered by testing. data = np.concatenate(eq_data, np.array(future_data).T) else: data = eq_data return pd.DataFrame( data, index=days_for_window, columns=assets ) def _get_history_daily_window_future(self, asset, days_for_window, end_dt, column): # Since we don't have daily bcolz files for futures (yet), use minute # bars to calculate the daily values. data = [] data_groups = [] # get all the minutes for the days NOT including today for day in days_for_window[:-1]: minutes = self.env.market_minutes_for_day(day) values_for_day = np.zeros(len(minutes), dtype=np.float64) for idx, minute in enumerate(minutes): minute_val = self._get_minute_spot_value_future( asset, column, minute ) values_for_day[idx] = minute_val data_groups.append(values_for_day) # get the minutes for today last_day_minutes = pd.date_range( start=self.env.get_open_and_close(end_dt)[0], end=end_dt, freq="T" ) values_for_last_day = np.zeros(len(last_day_minutes), dtype=np.float64) for idx, minute in enumerate(last_day_minutes): minute_val = self._get_minute_spot_value_future( asset, column, minute ) values_for_last_day[idx] = minute_val data_groups.append(values_for_last_day) for group in data_groups: if len(group) == 0: continue if column == 'volume': data.append(np.sum(group)) elif column == 'open': data.append(group[0]) elif column == 'close': data.append(group[-1]) elif column == 'high': data.append(np.amax(group)) elif column == 'low': data.append(np.amin(group)) return data def _get_history_daily_window_equities( self, assets, days_for_window, end_dt, field_to_use): ends_at_midnight = end_dt.hour == 0 and end_dt.minute == 0 if ends_at_midnight: # two cases where we use daily data for the whole range: # 1) the history window ends at midnight utc. # 2) the last desired day of the window is after the # last trading day, use daily data for the whole range. return self._get_daily_window_for_sids( assets, field_to_use, days_for_window, extra_slot=False ) else: # minute mode, requesting '1d' daily_data = self._get_daily_window_for_sids( assets, field_to_use, days_for_window[0:-1] ) if field_to_use == 'open': minute_value = self._equity_daily_aggregator.opens( assets, end_dt) elif field_to_use == 'high': minute_value = self._equity_daily_aggregator.highs( assets, end_dt) elif field_to_use == 'low': minute_value = self._equity_daily_aggregator.lows( assets, end_dt) elif field_to_use == 'close': minute_value = self._equity_daily_aggregator.closes( assets, end_dt) elif field_to_use == 'volume': minute_value = self._equity_daily_aggregator.volumes( assets, end_dt) # append the partial day. daily_data[-1] = minute_value return daily_data def _get_history_minute_window(self, assets, end_dt, bar_count, field_to_use): """ Internal method that returns a dataframe containing history bars of minute frequency for the given sids. """ # get all the minutes for this window mm = self.env.market_minutes end_loc = mm.get_loc(end_dt) start_loc = end_loc - bar_count + 1 if start_loc < 0: suggested_start_day = (mm[bar_count] + self.env.trading_day).date() raise HistoryWindowStartsBeforeData( first_trading_day=self.env.first_trading_day.date(), bar_count=bar_count, suggested_start_day=suggested_start_day, ) minutes_for_window = mm[start_loc:end_loc + 1] asset_minute_data = self._get_minute_window_for_assets( assets, field_to_use, minutes_for_window, ) return pd.DataFrame( asset_minute_data, index=minutes_for_window, columns=assets ) def get_history_window(self, assets, end_dt, bar_count, frequency, field, ffill=True): """ Public API method that returns a dataframe containing the requested history window. Data is fully adjusted. Parameters ---------- assets : list of zipline.data.Asset objects The assets whose data is desired. bar_count: int The number of bars desired. frequency: string "1d" or "1m" field: string The desired field of the asset. ffill: boolean Forward-fill missing values. Only has effect if field is 'price'. Returns ------- A dataframe containing the requested data. """ if field not in OHLCVP_FIELDS: raise ValueError("Invalid field: {0}".format(field)) if frequency == "1d": if field == "price": df = self._get_history_daily_window(assets, end_dt, bar_count, "close") else: df = self._get_history_daily_window(assets, end_dt, bar_count, field) elif frequency == "1m": if field == "price": df = self._get_history_minute_window(assets, end_dt, bar_count, "close") else: df = self._get_history_minute_window(assets, end_dt, bar_count, field) else: raise ValueError("Invalid frequency: {0}".format(frequency)) # forward-fill price if field == "price": if frequency == "1m": data_frequency = 'minute' elif frequency == "1d": data_frequency = 'daily' else: raise Exception( "Only 1d and 1m are supported for forward-filling.") dt_to_fill = df.index[0] perspective_dt = df.index[-1] assets_with_leading_nan = np.where(pd.isnull(df.iloc[0]))[0] for missing_loc in assets_with_leading_nan: asset = assets[missing_loc] previous_dt = self.get_last_traded_dt( asset, dt_to_fill, data_frequency) if pd.isnull(previous_dt): continue previous_value = self.get_adjusted_value( asset, field, previous_dt, perspective_dt, data_frequency, ) df.iloc[0, missing_loc] = previous_value df.fillna(method='ffill', inplace=True) for asset in df.columns: if df.index[-1] >= asset.end_date: # if the window extends past the asset's end date, set # all post-end-date values to NaN in that asset's series series = df[asset] series[series.index.normalize() > asset.end_date] = np.NaN return df def _get_minute_window_for_assets(self, assets, field, minutes_for_window): """ Internal method that gets a window of adjusted minute data for an asset and specified date range. Used to support the history API method for minute bars. Missing bars are filled with NaN. Parameters ---------- asset : Asset The asset whose data is desired. field: string The specific field to return. "open", "high", "close_price", etc. minutes_for_window: pd.DateTimeIndex The list of minutes representing the desired window. Each minute is a pd.Timestamp. Returns ------- A numpy array with requested values. """ if isinstance(assets, Future): return self._get_minute_window_for_future([assets], field, minutes_for_window) else: # TODO: Make caller accept assets. window = self._get_minute_window_for_equities(assets, field, minutes_for_window) return window def _get_minute_window_for_future(self, asset, field, minutes_for_window): # THIS IS TEMPORARY. For now, we are only exposing futures within # equity trading hours (9:30 am to 4pm, Eastern). The easiest way to # do this is to simply do a spot lookup for each desired minute. return_data = np.zeros(len(minutes_for_window), dtype=np.float64) for idx, minute in enumerate(minutes_for_window): return_data[idx] = \ self._get_minute_spot_value_future(asset, field, minute) # Note: an improvement could be to find the consecutive runs within # minutes_for_window, and use them to read the underlying ctable # more efficiently. # Once futures are on 24-hour clock, then we can just grab all the # requested minutes in one shot from the ctable. # no adjustments for futures, yay. return return_data def _get_minute_window_for_equities( self, assets, field, minutes_for_window): return self._equity_minute_history_loader.history(assets, minutes_for_window, field) def _apply_all_adjustments(self, data, asset, dts, field, price_adj_factor=1.0): """ Internal method that applies all the necessary adjustments on the given data array. The adjustments are: - splits - if field != "volume": - mergers - dividends - * 0.001 - any zero fields replaced with NaN - all values rounded to 3 digits after the decimal point. Parameters ---------- data : np.array The data to be adjusted. asset: Asset The asset whose data is being adjusted. dts: pd.DateTimeIndex The list of minutes or days representing the desired window. field: string The field whose values are in the data array. price_adj_factor: float Factor with which to adjust OHLC values. Returns ------- None. The data array is modified in place. """ self._apply_adjustments_to_window( self._get_adjustment_list( asset, self._splits_dict, "SPLITS" ), data, dts, field != 'volume' ) if field != 'volume': self._apply_adjustments_to_window( self._get_adjustment_list( asset, self._mergers_dict, "MERGERS" ), data, dts, True ) self._apply_adjustments_to_window( self._get_adjustment_list( asset, self._dividends_dict, "DIVIDENDS" ), data, dts, True ) if price_adj_factor is not None: data *= price_adj_factor np.around(data, 3, out=data) def _get_daily_window_for_sids( self, assets, field, days_in_window, extra_slot=True): """ Internal method that gets a window of adjusted daily data for a sid and specified date range. Used to support the history API method for daily bars. Parameters ---------- asset : Asset The asset whose data is desired. start_dt: pandas.Timestamp The start of the desired window of data. bar_count: int The number of days of data to return. field: string The specific field to return. "open", "high", "close_price", etc. extra_slot: boolean Whether to allocate an extra slot in the returned numpy array. This extra slot will hold the data for the last partial day. It's much better to create it here than to create a copy of the array later just to add a slot. Returns ------- A numpy array with requested values. Any missing slots filled with nan. """ bar_count = len(days_in_window) # create an np.array of size bar_count if extra_slot: return_array = np.zeros((bar_count + 1, len(assets))) else: return_array = np.zeros((bar_count, len(assets))) if field != "volume": # volumes default to 0, so we don't need to put NaNs in the array return_array[:] = np.NAN if bar_count != 0: data = self._equity_history_loader.history(assets, days_in_window, field) if extra_slot: return_array[:len(return_array) - 1, :] = data else: return_array[:len(data)] = data return return_array @staticmethod def _apply_adjustments_to_window(adjustments_list, window_data, dts_in_window, multiply): if len(adjustments_list) == 0: return # advance idx to the correct spot in the adjustments list, based on # when the window starts idx = 0 while idx < len(adjustments_list) and dts_in_window[0] >\ adjustments_list[idx][0]: idx += 1 # if we've advanced through all the adjustments, then there's nothing # to do. if idx == len(adjustments_list): return while idx < len(adjustments_list): adjustment_to_apply = adjustments_list[idx] if adjustment_to_apply[0] > dts_in_window[-1]: break range_end = dts_in_window.searchsorted(adjustment_to_apply[0]) if multiply: window_data[0:range_end] *= adjustment_to_apply[1] else: window_data[0:range_end] /= adjustment_to_apply[1] idx += 1 def _get_adjustment_list(self, asset, adjustments_dict, table_name): """ Internal method that returns a list of adjustments for the given sid. Parameters ---------- asset : Asset The asset for which to return adjustments. adjustments_dict: dict A dictionary of sid -> list that is used as a cache. table_name: string The table that contains this data in the adjustments db. Returns ------- adjustments: list A list of [multiplier, pd.Timestamp], earliest first """ if self._adjustment_reader is None: return [] sid = int(asset) try: adjustments = adjustments_dict[sid] except KeyError: adjustments = adjustments_dict[sid] = self._adjustment_reader.\ get_adjustments_for_sid(table_name, sid) return adjustments def _check_is_currently_alive(self, asset, dt): sid = int(asset) if sid not in self._asset_start_dates: self._get_asset_start_date(asset) start_date = self._asset_start_dates[sid] if self._asset_start_dates[sid] > dt: raise NoTradeDataAvailableTooEarly( sid=sid, dt=normalize_date(dt), start_dt=start_date ) end_date = self._asset_end_dates[sid] if self._asset_end_dates[sid] < dt: raise NoTradeDataAvailableTooLate( sid=sid, dt=normalize_date(dt), end_dt=end_date ) def _get_asset_start_date(self, asset): self._ensure_asset_dates(asset) return self._asset_start_dates[asset] def _get_asset_end_date(self, asset): self._ensure_asset_dates(asset) return self._asset_end_dates[asset] def _ensure_asset_dates(self, asset): sid = int(asset) if sid not in self._asset_start_dates: if self._first_trading_day is not None: self._asset_start_dates[sid] = \ max(asset.start_date, self._first_trading_day) else: self._asset_start_dates[sid] = asset.start_date self._asset_end_dates[sid] = asset.end_date def get_splits(self, sids, dt): """ Returns any splits for the given sids and the given dt. Parameters ---------- sids : container Sids for which we want splits. dt : pd.Timestamp The date for which we are checking for splits. Note: this is expected to be midnight UTC. Returns ------- splits : list[(int, float)] List of splits, where each split is a (sid, ratio) tuple. """ if self._adjustment_reader is None or not sids: return {} # convert dt to # of seconds since epoch, because that's what we use # in the adjustments db seconds = int(dt.value / 1e9) splits = self._adjustment_reader.conn.execute( "SELECT sid, ratio FROM SPLITS WHERE effective_date = ?", (seconds,)).fetchall() splits = [split for split in splits if split[0] in sids] return splits def get_stock_dividends(self, sid, trading_days): """ Returns all the stock dividends for a specific sid that occur in the given trading range. Parameters ---------- sid: int The asset whose stock dividends should be returned. trading_days: pd.DatetimeIndex The trading range. Returns ------- list: A list of objects with all relevant attributes populated. All timestamp fields are converted to pd.Timestamps. """ if self._adjustment_reader is None: return [] if len(trading_days) == 0: return [] start_dt = trading_days[0].value / 1e9 end_dt = trading_days[-1].value / 1e9 dividends = self._adjustment_reader.conn.execute( "SELECT * FROM stock_dividend_payouts WHERE sid = ? AND " "ex_date > ? AND pay_date < ?", (int(sid), start_dt, end_dt,)).\ fetchall() dividend_info = [] for dividend_tuple in dividends: dividend_info.append({ "declared_date": dividend_tuple[1], "ex_date": pd.Timestamp(dividend_tuple[2], unit="s"), "pay_date": pd.Timestamp(dividend_tuple[3], unit="s"), "payment_sid": dividend_tuple[4], "ratio": dividend_tuple[5], "record_date": pd.Timestamp(dividend_tuple[6], unit="s"), "sid": dividend_tuple[7] }) return dividend_info def contains(self, asset, field): return field in BASE_FIELDS or \ (field in self._augmented_sources_map and asset in self._augmented_sources_map[field]) def get_fetcher_assets(self, dt): """ Returns a list of assets for the current date, as defined by the fetcher data. Returns ------- list: a list of Asset objects. """ # return a list of assets for the current date, as defined by the # fetcher source if self._extra_source_df is None: return [] day = normalize_date(dt) if day in self._extra_source_df.index: assets = self._extra_source_df.loc[day]['sid'] else: return [] if isinstance(assets, pd.Series): return [x for x in assets if isinstance(x, Asset)] else: return [assets] if isinstance(assets, Asset) else [] @weak_lru_cache(20) def _get_minute_count_for_transform(self, ending_minute, days_count): # cache size picked somewhat loosely. this code exists purely to # handle deprecated API. # bars is the number of days desired. we have to translate that # into the number of minutes we want. # we get all the minutes for the last (bars - 1) days, then add # all the minutes so far today. the +2 is to account for ignoring # today, and the previous day, in doing the math. previous_day = self.env.previous_trading_day(ending_minute) days = self.env.days_in_range( self.env.add_trading_days(-days_count + 2, previous_day), previous_day, ) minutes_count = \ sum(210 if day in self.env.early_closes else 390 for day in days) # add the minutes for today today_open = self.env.get_open_and_close(ending_minute)[0] minutes_count += \ ((ending_minute - today_open).total_seconds() // 60) + 1 return minutes_count def get_simple_transform(self, asset, transform_name, dt, data_frequency, bars=None): if transform_name == "returns": # returns is always calculated over the last 2 days, regardless # of the simulation's data frequency. hst = self.get_history_window( [asset], dt, 2, "1d", "price", ffill=True )[asset] return (hst.iloc[-1] - hst.iloc[0]) / hst.iloc[0] if bars is None: raise ValueError("bars cannot be None!") if data_frequency == "minute": freq_str = "1m" calculated_bar_count = self._get_minute_count_for_transform( dt, bars ) else: freq_str = "1d" calculated_bar_count = bars price_arr = self.get_history_window( [asset], dt, calculated_bar_count, freq_str, "price", ffill=True )[asset] if transform_name == "mavg": return nanmean(price_arr) elif transform_name == "stddev": return nanstd(price_arr, ddof=1) elif transform_name == "vwap": volume_arr = self.get_history_window( [asset], dt, calculated_bar_count, freq_str, "volume", ffill=True )[asset] vol_sum = nansum(volume_arr) try: ret = nansum(price_arr * volume_arr) / vol_sum except ZeroDivisionError: ret = np.nan return ret
apache-2.0
nmartensen/pandas
pandas/tests/indexing/test_callable.py
14
8721
# -*- coding: utf-8 -*- # pylint: disable-msg=W0612,E1101 import numpy as np import pandas as pd import pandas.util.testing as tm class TestIndexingCallable(object): def test_frame_loc_ix_callable(self): # GH 11485 df = pd.DataFrame({'A': [1, 2, 3, 4], 'B': list('aabb'), 'C': [1, 2, 3, 4]}) # iloc cannot use boolean Series (see GH3635) # return bool indexer res = df.loc[lambda x: x.A > 2] tm.assert_frame_equal(res, df.loc[df.A > 2]) res = df.loc[lambda x: x.A > 2] tm.assert_frame_equal(res, df.loc[df.A > 2]) res = df.loc[lambda x: x.A > 2, ] tm.assert_frame_equal(res, df.loc[df.A > 2, ]) res = df.loc[lambda x: x.A > 2, ] tm.assert_frame_equal(res, df.loc[df.A > 2, ]) res = df.loc[lambda x: x.B == 'b', :] tm.assert_frame_equal(res, df.loc[df.B == 'b', :]) res = df.loc[lambda x: x.B == 'b', :] tm.assert_frame_equal(res, df.loc[df.B == 'b', :]) res = df.loc[lambda x: x.A > 2, lambda x: x.columns == 'B'] tm.assert_frame_equal(res, df.loc[df.A > 2, [False, True, False]]) res = df.loc[lambda x: x.A > 2, lambda x: x.columns == 'B'] tm.assert_frame_equal(res, df.loc[df.A > 2, [False, True, False]]) res = df.loc[lambda x: x.A > 2, lambda x: 'B'] tm.assert_series_equal(res, df.loc[df.A > 2, 'B']) res = df.loc[lambda x: x.A > 2, lambda x: 'B'] tm.assert_series_equal(res, df.loc[df.A > 2, 'B']) res = df.loc[lambda x: x.A > 2, lambda x: ['A', 'B']] tm.assert_frame_equal(res, df.loc[df.A > 2, ['A', 'B']]) res = df.loc[lambda x: x.A > 2, lambda x: ['A', 'B']] tm.assert_frame_equal(res, df.loc[df.A > 2, ['A', 'B']]) res = df.loc[lambda x: x.A == 2, lambda x: ['A', 'B']] tm.assert_frame_equal(res, df.loc[df.A == 2, ['A', 'B']]) res = df.loc[lambda x: x.A == 2, lambda x: ['A', 'B']] tm.assert_frame_equal(res, df.loc[df.A == 2, ['A', 'B']]) # scalar res = df.loc[lambda x: 1, lambda x: 'A'] assert res == df.loc[1, 'A'] res = df.loc[lambda x: 1, lambda x: 'A'] assert res == df.loc[1, 'A'] def test_frame_loc_ix_callable_mixture(self): # GH 11485 df = pd.DataFrame({'A': [1, 2, 3, 4], 'B': list('aabb'), 'C': [1, 2, 3, 4]}) res = df.loc[lambda x: x.A > 2, ['A', 'B']] tm.assert_frame_equal(res, df.loc[df.A > 2, ['A', 'B']]) res = df.loc[lambda x: x.A > 2, ['A', 'B']] tm.assert_frame_equal(res, df.loc[df.A > 2, ['A', 'B']]) res = df.loc[[2, 3], lambda x: ['A', 'B']] tm.assert_frame_equal(res, df.loc[[2, 3], ['A', 'B']]) res = df.loc[[2, 3], lambda x: ['A', 'B']] tm.assert_frame_equal(res, df.loc[[2, 3], ['A', 'B']]) res = df.loc[3, lambda x: ['A', 'B']] tm.assert_series_equal(res, df.loc[3, ['A', 'B']]) res = df.loc[3, lambda x: ['A', 'B']] tm.assert_series_equal(res, df.loc[3, ['A', 'B']]) def test_frame_loc_callable(self): # GH 11485 df = pd.DataFrame({'X': [1, 2, 3, 4], 'Y': list('aabb')}, index=list('ABCD')) # return label res = df.loc[lambda x: ['A', 'C']] tm.assert_frame_equal(res, df.loc[['A', 'C']]) res = df.loc[lambda x: ['A', 'C'], ] tm.assert_frame_equal(res, df.loc[['A', 'C'], ]) res = df.loc[lambda x: ['A', 'C'], :] tm.assert_frame_equal(res, df.loc[['A', 'C'], :]) res = df.loc[lambda x: ['A', 'C'], lambda x: 'X'] tm.assert_series_equal(res, df.loc[['A', 'C'], 'X']) res = df.loc[lambda x: ['A', 'C'], lambda x: ['X']] tm.assert_frame_equal(res, df.loc[['A', 'C'], ['X']]) # mixture res = df.loc[['A', 'C'], lambda x: 'X'] tm.assert_series_equal(res, df.loc[['A', 'C'], 'X']) res = df.loc[['A', 'C'], lambda x: ['X']] tm.assert_frame_equal(res, df.loc[['A', 'C'], ['X']]) res = df.loc[lambda x: ['A', 'C'], 'X'] tm.assert_series_equal(res, df.loc[['A', 'C'], 'X']) res = df.loc[lambda x: ['A', 'C'], ['X']] tm.assert_frame_equal(res, df.loc[['A', 'C'], ['X']]) def test_frame_loc_callable_setitem(self): # GH 11485 df = pd.DataFrame({'X': [1, 2, 3, 4], 'Y': list('aabb')}, index=list('ABCD')) # return label res = df.copy() res.loc[lambda x: ['A', 'C']] = -20 exp = df.copy() exp.loc[['A', 'C']] = -20 tm.assert_frame_equal(res, exp) res = df.copy() res.loc[lambda x: ['A', 'C'], :] = 20 exp = df.copy() exp.loc[['A', 'C'], :] = 20 tm.assert_frame_equal(res, exp) res = df.copy() res.loc[lambda x: ['A', 'C'], lambda x: 'X'] = -1 exp = df.copy() exp.loc[['A', 'C'], 'X'] = -1 tm.assert_frame_equal(res, exp) res = df.copy() res.loc[lambda x: ['A', 'C'], lambda x: ['X']] = [5, 10] exp = df.copy() exp.loc[['A', 'C'], ['X']] = [5, 10] tm.assert_frame_equal(res, exp) # mixture res = df.copy() res.loc[['A', 'C'], lambda x: 'X'] = np.array([-1, -2]) exp = df.copy() exp.loc[['A', 'C'], 'X'] = np.array([-1, -2]) tm.assert_frame_equal(res, exp) res = df.copy() res.loc[['A', 'C'], lambda x: ['X']] = 10 exp = df.copy() exp.loc[['A', 'C'], ['X']] = 10 tm.assert_frame_equal(res, exp) res = df.copy() res.loc[lambda x: ['A', 'C'], 'X'] = -2 exp = df.copy() exp.loc[['A', 'C'], 'X'] = -2 tm.assert_frame_equal(res, exp) res = df.copy() res.loc[lambda x: ['A', 'C'], ['X']] = -4 exp = df.copy() exp.loc[['A', 'C'], ['X']] = -4 tm.assert_frame_equal(res, exp) def test_frame_iloc_callable(self): # GH 11485 df = pd.DataFrame({'X': [1, 2, 3, 4], 'Y': list('aabb')}, index=list('ABCD')) # return location res = df.iloc[lambda x: [1, 3]] tm.assert_frame_equal(res, df.iloc[[1, 3]]) res = df.iloc[lambda x: [1, 3], :] tm.assert_frame_equal(res, df.iloc[[1, 3], :]) res = df.iloc[lambda x: [1, 3], lambda x: 0] tm.assert_series_equal(res, df.iloc[[1, 3], 0]) res = df.iloc[lambda x: [1, 3], lambda x: [0]] tm.assert_frame_equal(res, df.iloc[[1, 3], [0]]) # mixture res = df.iloc[[1, 3], lambda x: 0] tm.assert_series_equal(res, df.iloc[[1, 3], 0]) res = df.iloc[[1, 3], lambda x: [0]] tm.assert_frame_equal(res, df.iloc[[1, 3], [0]]) res = df.iloc[lambda x: [1, 3], 0] tm.assert_series_equal(res, df.iloc[[1, 3], 0]) res = df.iloc[lambda x: [1, 3], [0]] tm.assert_frame_equal(res, df.iloc[[1, 3], [0]]) def test_frame_iloc_callable_setitem(self): # GH 11485 df = pd.DataFrame({'X': [1, 2, 3, 4], 'Y': list('aabb')}, index=list('ABCD')) # return location res = df.copy() res.iloc[lambda x: [1, 3]] = 0 exp = df.copy() exp.iloc[[1, 3]] = 0 tm.assert_frame_equal(res, exp) res = df.copy() res.iloc[lambda x: [1, 3], :] = -1 exp = df.copy() exp.iloc[[1, 3], :] = -1 tm.assert_frame_equal(res, exp) res = df.copy() res.iloc[lambda x: [1, 3], lambda x: 0] = 5 exp = df.copy() exp.iloc[[1, 3], 0] = 5 tm.assert_frame_equal(res, exp) res = df.copy() res.iloc[lambda x: [1, 3], lambda x: [0]] = 25 exp = df.copy() exp.iloc[[1, 3], [0]] = 25 tm.assert_frame_equal(res, exp) # mixture res = df.copy() res.iloc[[1, 3], lambda x: 0] = -3 exp = df.copy() exp.iloc[[1, 3], 0] = -3 tm.assert_frame_equal(res, exp) res = df.copy() res.iloc[[1, 3], lambda x: [0]] = -5 exp = df.copy() exp.iloc[[1, 3], [0]] = -5 tm.assert_frame_equal(res, exp) res = df.copy() res.iloc[lambda x: [1, 3], 0] = 10 exp = df.copy() exp.iloc[[1, 3], 0] = 10 tm.assert_frame_equal(res, exp) res = df.copy() res.iloc[lambda x: [1, 3], [0]] = [-5, -5] exp = df.copy() exp.iloc[[1, 3], [0]] = [-5, -5] tm.assert_frame_equal(res, exp)
bsd-3-clause
cwu2011/scikit-learn
sklearn/preprocessing/__init__.py
14
1184
""" The :mod:`sklearn.preprocessing` module includes scaling, centering, normalization, binarization and imputation methods. """ from .data import Binarizer from .data import KernelCenterer from .data import MinMaxScaler from .data import MaxAbsScaler from .data import Normalizer from .data import RobustScaler from .data import StandardScaler from .data import add_dummy_feature from .data import binarize from .data import normalize from .data import scale from .data import robust_scale from .data import maxabs_scale from .data import OneHotEncoder from .data import PolynomialFeatures from .label import label_binarize from .label import LabelBinarizer from .label import LabelEncoder from .label import MultiLabelBinarizer from .imputation import Imputer __all__ = [ 'Binarizer', 'Imputer', 'KernelCenterer', 'LabelBinarizer', 'LabelEncoder', 'MultiLabelBinarizer', 'MinMaxScaler', 'MaxAbsScaler', 'Normalizer', 'OneHotEncoder', 'RobustScaler', 'StandardScaler', 'add_dummy_feature', 'PolynomialFeatures', 'binarize', 'normalize', 'scale', 'robust_scale', 'maxabs_scale', 'label_binarize', ]
bsd-3-clause
qifeigit/scikit-learn
sklearn/neighbors/tests/test_dist_metrics.py
230
5234
import itertools import pickle import numpy as np from numpy.testing import assert_array_almost_equal import scipy from scipy.spatial.distance import cdist from sklearn.neighbors.dist_metrics import DistanceMetric from nose import SkipTest def dist_func(x1, x2, p): return np.sum((x1 - x2) ** p) ** (1. / p) def cmp_version(version1, version2): version1 = tuple(map(int, version1.split('.')[:2])) version2 = tuple(map(int, version2.split('.')[:2])) if version1 < version2: return -1 elif version1 > version2: return 1 else: return 0 class TestMetrics: def __init__(self, n1=20, n2=25, d=4, zero_frac=0.5, rseed=0, dtype=np.float64): np.random.seed(rseed) self.X1 = np.random.random((n1, d)).astype(dtype) self.X2 = np.random.random((n2, d)).astype(dtype) # make boolean arrays: ones and zeros self.X1_bool = self.X1.round(0) self.X2_bool = self.X2.round(0) V = np.random.random((d, d)) VI = np.dot(V, V.T) self.metrics = {'euclidean': {}, 'cityblock': {}, 'minkowski': dict(p=(1, 1.5, 2, 3)), 'chebyshev': {}, 'seuclidean': dict(V=(np.random.random(d),)), 'wminkowski': dict(p=(1, 1.5, 3), w=(np.random.random(d),)), 'mahalanobis': dict(VI=(VI,)), 'hamming': {}, 'canberra': {}, 'braycurtis': {}} self.bool_metrics = ['matching', 'jaccard', 'dice', 'kulsinski', 'rogerstanimoto', 'russellrao', 'sokalmichener', 'sokalsneath'] def test_cdist(self): for metric, argdict in self.metrics.items(): keys = argdict.keys() for vals in itertools.product(*argdict.values()): kwargs = dict(zip(keys, vals)) D_true = cdist(self.X1, self.X2, metric, **kwargs) yield self.check_cdist, metric, kwargs, D_true for metric in self.bool_metrics: D_true = cdist(self.X1_bool, self.X2_bool, metric) yield self.check_cdist_bool, metric, D_true def check_cdist(self, metric, kwargs, D_true): if metric == 'canberra' and cmp_version(scipy.__version__, '0.9') <= 0: raise SkipTest("Canberra distance incorrect in scipy < 0.9") dm = DistanceMetric.get_metric(metric, **kwargs) D12 = dm.pairwise(self.X1, self.X2) assert_array_almost_equal(D12, D_true) def check_cdist_bool(self, metric, D_true): dm = DistanceMetric.get_metric(metric) D12 = dm.pairwise(self.X1_bool, self.X2_bool) assert_array_almost_equal(D12, D_true) def test_pdist(self): for metric, argdict in self.metrics.items(): keys = argdict.keys() for vals in itertools.product(*argdict.values()): kwargs = dict(zip(keys, vals)) D_true = cdist(self.X1, self.X1, metric, **kwargs) yield self.check_pdist, metric, kwargs, D_true for metric in self.bool_metrics: D_true = cdist(self.X1_bool, self.X1_bool, metric) yield self.check_pdist_bool, metric, D_true def check_pdist(self, metric, kwargs, D_true): if metric == 'canberra' and cmp_version(scipy.__version__, '0.9') <= 0: raise SkipTest("Canberra distance incorrect in scipy < 0.9") dm = DistanceMetric.get_metric(metric, **kwargs) D12 = dm.pairwise(self.X1) assert_array_almost_equal(D12, D_true) def check_pdist_bool(self, metric, D_true): dm = DistanceMetric.get_metric(metric) D12 = dm.pairwise(self.X1_bool) assert_array_almost_equal(D12, D_true) def test_haversine_metric(): def haversine_slow(x1, x2): return 2 * np.arcsin(np.sqrt(np.sin(0.5 * (x1[0] - x2[0])) ** 2 + np.cos(x1[0]) * np.cos(x2[0]) * np.sin(0.5 * (x1[1] - x2[1])) ** 2)) X = np.random.random((10, 2)) haversine = DistanceMetric.get_metric("haversine") D1 = haversine.pairwise(X) D2 = np.zeros_like(D1) for i, x1 in enumerate(X): for j, x2 in enumerate(X): D2[i, j] = haversine_slow(x1, x2) assert_array_almost_equal(D1, D2) assert_array_almost_equal(haversine.dist_to_rdist(D1), np.sin(0.5 * D2) ** 2) def test_pyfunc_metric(): X = np.random.random((10, 3)) euclidean = DistanceMetric.get_metric("euclidean") pyfunc = DistanceMetric.get_metric("pyfunc", func=dist_func, p=2) # Check if both callable metric and predefined metric initialized # DistanceMetric object is picklable euclidean_pkl = pickle.loads(pickle.dumps(euclidean)) pyfunc_pkl = pickle.loads(pickle.dumps(pyfunc)) D1 = euclidean.pairwise(X) D2 = pyfunc.pairwise(X) D1_pkl = euclidean_pkl.pairwise(X) D2_pkl = pyfunc_pkl.pairwise(X) assert_array_almost_equal(D1, D2) assert_array_almost_equal(D1_pkl, D2_pkl)
bsd-3-clause