repo_name
stringlengths 7
90
| path
stringlengths 5
191
| copies
stringlengths 1
3
| size
stringlengths 4
6
| content
stringlengths 976
581k
| license
stringclasses 15
values |
---|---|---|---|---|---|
dpressel/baseline | scripts/lr_visualize.py | 1 | 8549 | import os
import math
import json
import inspect
import argparse
from hashlib import sha1
from collections import defaultdict
import numpy as np
import baseline as bl
import matplotlib.pyplot as plt
from lr_compare import plot_learning_rates
# Collect possible Schedulers
OPTIONS = {}
WARMUP = {}
base_classes = (
bl.train.LearningRateScheduler.__name__,
bl.train.WarmupLearningRateScheduler.__name__
)
# Collect all schedulers except base classes and separate out the warmup ones
for item_name in dir(bl.train):
if item_name in base_classes: continue
item = getattr(bl.train, item_name)
try:
if issubclass(item, bl.train.LearningRateScheduler):
OPTIONS[item_name] = item
if issubclass(item, bl.train.WarmupLearningRateScheduler):
WARMUP[item_name] = item
except:
pass
REST = {}
for k, v in OPTIONS.items():
if k not in WARMUP:
REST[k] = v
# Collect the args and defaults that the schedulers use.
# This currently misses the `lr` because it is kwargs only as well
# As `values` because that is never a defaulted argument
# (we skip over ones without defaults)
ARGS = defaultdict(list)
DEFAULTS = defaultdict(list)
for k, v in OPTIONS.items():
az = inspect.getargspec(v).args
ds = inspect.getargspec(v).defaults
if az is None or ds is None: continue
for a, d in zip(reversed(az), reversed(ds)):
ARGS[a].append(k)
DEFAULTS[a].append(d)
for a, ds in DEFAULTS.items():
ds_set = set(ds)
if len(ds_set) == 1:
# If the only default values is None set to None.
if None in ds_set:
DEFAULTS[a] = None
# Set to the only default.
else:
DEFAULTS[a] = ds_set.pop()
else:
# Remove None and grab the last default, no reason arbitrary but
# consistant
if None in ds: ds.remove(None)
DEFAULTS[a] = ds[-1]
new_defaults = defaultdict(lambda: None)
for k, v in DEFAULTS.items():
new_defaults[k] = v
DEFAULTS = new_defaults
def exp_steps(y, lr, decay_rate, decay_steps):
"""Calculate how many steps are needed to get to some value."""
return decay_steps * (math.log(y / lr) / math.log(decay_rate))
def inv_steps(y, lr, decay_rate, decay_steps):
"""Calculate how many steps are needed to get to some value."""
return decay_steps * (((lr / y) - 1.0) / decay_rate)
def get_steps(type_, args, double_warm=False):
"""
Calculate the number of steps to plot. Most of these calculations are
bespoke to the scheduler.
"""
if type_ == 'ConstantScheduler':
return 1000
# WarmupLinear
if type_ in WARMUP:
# If we are just warm up double it to show its fine
return args['warmup_steps'] * 2 if double_warm else args['warmup_steps']
# PiecewiseDecay, Zaremba
if args['boundaries'] is not None:
# If we are bound based how bounds plus some extra
gap = np.max(np.diff(args['boundaries']))
return args['boundaries'][-1] + gap
if type_ == 'CyclicLRScheduler':
# Cyclic, show some cycles
return args['decay_steps'] * args['cycles']
# Show it decay once
# Cosine, InverseTimeDecay, Exponential
if type_ == 'ExponentialDecayScheduler':
return exp_steps(args['min_lr'], args['lr'], args['decay_rate'], args['decay_steps'])
if type_ == 'InverseTimeDecayScheduler':
return inv_steps(args['min_lr'], args['lr'], args['decay_rate'], args['decay_steps'])
if type_ == 'CosineDecayScheduler':
return args['decay_steps']
# def plot_learning_rates(ys, names):
# fig, ax = plt.subplots(1, 1)
# ax.set_title('Learning Rates from baseline.')
# ax.set_xlabel('Steps')
# ax.set_ylabel('Learning Rates')
# for y, name in zip(ys, names):
# ax.plot(np.arange(len(y)), y, label=name)
# ax.legend()
# return fig
def main():
parser = argparse.ArgumentParser(description="Plot a learning rate schedule.")
# Begin lr scheduler based arguments
parser.add_argument(
"type",
choices=OPTIONS.keys(),
help="The scheduler to visualize."
)
parser.add_argument(
"--lr",
type=float, default=1.0,
help="The initial Learning Rate."
)
parser.add_argument(
"--warmup_steps",
type=int, default=DEFAULTS['warmup_steps'],
help="The number of steps for a warmup. Used in {}.".format(", ".join(ARGS['warmup_steps']))
)
parser.add_argument(
"--max_lr",
type=float, default=DEFAULTS['max_lr'],
help="The maximum learning rate for a cyclical one. Used in {}.".format(", ".join(ARGS['max_lr']))
)
parser.add_argument(
"--decay_steps",
type=int, default=DEFAULTS['decay_steps'],
help="The number of steps to take before a decay. Used in {}.".format(", ".join(ARGS['decay_steps']))
)
parser.add_argument("--boundaries", nargs="+", default=DEFAULTS['boundaries'])
parser.add_argument("--values", nargs="+", default=DEFAULTS['values'])
parser.add_argument(
"--decay_rate",
type=float, default=DEFAULTS['decay_rate'],
help="How much to decay. Used in {}.".format(", ".join(ARGS['decay_rate']))
)
parser.add_argument(
"--staircase",
action="store_true", default=DEFAULTS['staircase'],
help="Should you decay in a stepwise fashion? Used in {}.".format(", ".join(ARGS['staircase']))
)
parser.add_argument(
"--alpha",
type=float, default=DEFAULTS['alpha'],
help="Alpha. Used in {}.".format(", ".join(ARGS['alpha']))
)
parser.add_argument(
"--warm",
choices=WARMUP.keys(),
default=DEFAULTS['warm'],
help="The Warmup Scheduler to use. Used in {}.".format(", ".join(ARGS['warm']))
)
parser.add_argument(
"--rest",
choices=REST.keys(),
default=DEFAULTS['rest'],
help="The Scheduler to use after warmup. Used in {}.".format(", ".join(ARGS['rest']))
)
# Begin Visualization only arguments
parser.add_argument(
"--steps", type=int,
help="Override the number of steps to plot."
)
parser.add_argument(
"--cycles",
type=int, default=6,
help="Override the number of cycles to plot."
)
parser.add_argument(
"--min_lr",
type=float, default=1e-3,
help="When calculating the number of steps to show how small should the learning rate get before we stop."
)
parser.add_argument(
"--out_file", default='.lrs/cache.index', help="Where to save the results for later plotting."
)
args = parser.parse_args()
args = vars(args)
# Build the sub schedulers for the Composite Scheduler
if args['type'] == 'CompositeLRScheduler':
if args['warm'] is None or args['rest'] is None:
raise RuntimeError("Warmup and Rest Scheduler are required when the Scheduler is CompositeLRScheduler")
args['warm'] = WARMUP[args['warm']](**args)
args['rest'] = REST[args['rest']](**args)
if args['boundaries'] is not None:
if args['values'] is not None:
if len(args['boundaries']) != len(args['values']):
raise RuntimeError("Bounds and Value list must be aligned")
lr = OPTIONS[args['type']](**args)
# Calculate the number of steps you should show.
if args['steps'] is None:
if args['type'] == 'CompositeLRScheduler':
warm = get_steps(type(args['warm']).__name__, args, double_warm=False)
rest = get_steps(type(args['rest']).__name__, args)
args['steps'] = warm + rest
else:
args['steps'] = get_steps(args['type'], args, double_warm=True)
# Plot the schedule
steps = np.arange(0, args['steps'])
ys = np.stack(lr(s) for s in steps)
fig = plot_learning_rates([ys], [str(lr)])
plt.show()
# Save the lr values to a cache, this is .lrs/{hash of lr values}.npy and save
# information about it into the cache index
if args['out_file'] is not None:
dir_ = os.path.dirname(args['out_file'])
try: os.makedirs(dir_)
except: pass
with open(args['out_file'], 'a') as f:
lr_hash = sha1(ys.tostring()).hexdigest()
file_name = "{}.npy".format(lr_hash)
index = {'name': str(lr), 'file': file_name}
f.write(json.dumps(index) + "\n")
np.save(os.path.join(dir_, file_name), ys)
if __name__ == "__main__":
main()
| apache-2.0 |
kennethdecker/MagnePlane | paper/images/trade_scripts/pressure_trades_plot.py | 4 | 2273 | import numpy as np
import matplotlib.pyplot as plt
p_tunnel = np.loadtxt('../data_files/pressure_trades/p_tunnel.txt', delimiter = '\t')
Re = np.loadtxt('../data_files/pressure_trades/Re.txt', delimiter = '\t')
A_tube = np.loadtxt('../data_files/pressure_trades/A_tube.txt', delimiter = '\t')
T_tunnel = np.loadtxt('../data_files/pressure_trades/T_tunnel.txt', delimiter = '\t')
L_pod = np.loadtxt('../data_files/pressure_trades/L_pod.txt', delimiter = '\t')
power = np.loadtxt('../data_files/pressure_trades/comp_power.txt', delimiter = '\t')
steady_vac = np.loadtxt('../data_files/pressure_trades/vac_power.txt', delimiter = '\t')
total_energy = np.loadtxt('../data_files/pressure_trades/total_energy.txt', delimiter = '\t')
fig = plt.figure(figsize = (3.25,3.5), tight_layout = True)
ax = plt.axes()
plt.setp(ax.get_xticklabels(), fontsize=8)
plt.setp(ax.get_yticklabels(), fontsize=8)
plt.plot(p_tunnel, A_tube, 'b-', linewidth = 2.0)
plt.xlabel('Tube Pressure (Pa)', fontsize = 10, fontweight = 'bold')
plt.ylabel('Tube Area ($m^2$)', fontsize = 10, fontweight = 'bold')
ax.set_xticks([1000,2000,3000])
plt.savefig('../graphs/pressure_trades/pressure_vs_Area.png', format = 'png', dpi = 300)
plt.show()
fig = plt.figure(figsize = (3.25,3.5), tight_layout = True)
ax1 = fig.add_subplot(211)
line1, = ax1.plot(p_tunnel, steady_vac, 'g-', linewidth = 2.0, label = 'Vaccum Power')
line2, = ax1.plot(p_tunnel, power, 'b-', linewidth = 2.0, label = 'Compressor Power')
ax1.set_ylabel('Power (hp)', fontsize = 10, fontweight = 'bold')
plt.legend(handles = [line1, line2], loc = 1, fontsize = 8)
ax1.set_xticks([1000,2000,3000])
ax1.set_yticks([4000, 8000, 12000, 16000])
ax2 = fig.add_subplot(212)
ax2.plot(p_tunnel, total_energy/(1.0e6), 'r-', linewidth = 2.0)
ax2.set_xlabel('Tube Pressure (Pa)', fontsize = 10, fontweight = 'bold')
ax2.set_ylabel('Total Energy Cost \n per Year (Million USD)', fontsize = 10, fontweight = 'bold')
ax2.set_xticks([1000,2000,3000])
ax2.set_yticks([0,40,80,120])
plt.setp(ax1.get_xticklabels(), fontsize=8)
plt.setp(ax1.get_yticklabels(), fontsize=8)
plt.setp(ax2.get_xticklabels(), fontsize=8)
plt.setp(ax2.get_yticklabels(), fontsize=8)
plt.savefig('../graphs/pressure_trades/pressure_vs_power.png', format = 'png', dpi = 300)
plt.show() | apache-2.0 |
Darthone/Informed-Finance-Canary | tinkering/ml/sklearn_svr.py | 2 | 3910 | #!/usr/bin/env python -W ignore::DeprecationWarning
import numpy as np
import pandas as pd
from sklearn import preprocessing, cross_validation, neighbors, svm, metrics, grid_search
import peewee
from peewee import *
import ifc.ta as ta
import math
def addDailyReturn(dataset):
"""
Adding in daily return to create binary classifiers (Up or Down in relation to the previous day)
"""
#will normalize labels
le = preprocessing.LabelEncoder()
#print "dataset['Adj_Close']\n", dataset['Adj_Close'][:5]
#print "dataset['Adj_Close'].shift(-1)\n", dataset['Adj_Close'].shift(1)[:5]
dataset['UpDown'] = (dataset['Adj_Close']-dataset['Adj_Close'].shift(1))/dataset['Adj_Close'].shift(1)
#print dataset['UpDown'][240:]
# will be denoted by 3 when transformed
dataset.UpDown[dataset.UpDown > 0] = "sell"
dataset.UpDown[dataset.UpDown == 0] = "hold"
dataset.UpDown[dataset.UpDown < 0] = "buy"
#print dataset['UpDown'][:10]
dataset.UpDown = le.fit(dataset.UpDown).transform(dataset.UpDown)
#print dataset['UpDown']
accuracies = []
def preProcessing(stock_name, start_date, end_date):
"""
Clean up data to allow for classifiers to predict
"""
x = ta.get_series(stock_name, start_date, end_date)
x.run_calculations()
x.trim_fat()
df = x.df
#df = pd.read_csv(csv)
addDailyReturn(df)
#The columns left will be the ones that are being used to predict
df.drop(['Date'], 1, inplace=True)
df.drop(['Low'], 1, inplace=True)
df.drop(['Volume'], 1, inplace=True)
#df.drop(['Open'], 1, inplace=True)
#df.drop(['Adj_Close'],1, inplace=True)
df.drop(['Close'],1, inplace=True)
df.drop(['High'],1, inplace=True)
df.drop(['mavg_10'],1, inplace=True)
df.drop(['mavg_30'],1, inplace=True)
df.drop(['rsi_14'],1, inplace=True)
return df
def regressorOp(x, y):
"""
This will optimize the parameters for the algo
"""
regr_rbf = svm.SVR(kernel="rbf")
C = [1000, 10, 1]
gamma = [0.005, 0.004, 0.003, 0.002, 0.001]
epsilon = [0.1, 0.01]
parameters = {"C":C, "gamma":gamma, "epsilon":epsilon}
gs = grid_search.GridSearchCV(regr_rbf, parameters, scoring="r2")
gs.fit(x, y)
print "Best Estimator:\n", gs.best_estimator_
print "Type: ", type(gs.best_estimator_)
return gs.best_estimator_
for i in range(1):
#calling in date ranges plus stock name to be pulled
ticker = raw_input('Enter a stock ticker then press "Enter":\n')
train_df = preProcessing(ticker, "2015-04-17", "2016-04-17")
test_df = preProcessing(ticker, "2016-04-17", "2017-04-17")
print "-----------------------------------------"
print "test_df[:5]:"
print test_df[:5]
# separating the binary predictor into different arryays so the algo knows what to predict on
X_train = np.array(train_df.drop(['UpDown'],1))
y_train = np.array(train_df['UpDown'])
X_test = np.array(test_df.drop(['UpDown'],1))
y_test = np.array(test_df['UpDown'])
#print test_df[:240]
#X_train, X_test, y_train, y_test = cross_validation.train_test_split(X, y, test_size=0.5)
# regression optimization
clf = regressorOp(X_train, y_train)
clf.fit(X_train,y_train)
y_pred = clf.predict(X_test)
accuracy = clf.score(X_test,y_test)
variance = metrics.explained_variance_score(y_test, y_pred)
# iterate and print average accuracy rate
print "---------------------------------------"
print "accuracy: " + str(accuracy)
print "\nvariance: " + str(variance)
accuracies.append(accuracy)
# test value
test_set = np.array([[100,100],[0,0],[45, 42],[6,6]])
print "--------------------------------------"
print "np.array([100,100],[0,0],[45, 42],[6,6]])"
prediction = clf.predict(test_set)
print "--------------------------------------"
print "prediction: "
print prediction
#print sum(accuracies)/len(accuracies)
| mit |
mblondel/scikit-learn | examples/linear_model/plot_sgd_separating_hyperplane.py | 260 | 1219 | """
=========================================
SGD: Maximum margin separating hyperplane
=========================================
Plot the maximum margin separating hyperplane within a two-class
separable dataset using a linear Support Vector Machines classifier
trained using SGD.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.linear_model import SGDClassifier
from sklearn.datasets.samples_generator import make_blobs
# we create 50 separable points
X, Y = make_blobs(n_samples=50, centers=2, random_state=0, cluster_std=0.60)
# fit the model
clf = SGDClassifier(loss="hinge", alpha=0.01, n_iter=200, fit_intercept=True)
clf.fit(X, Y)
# plot the line, the points, and the nearest vectors to the plane
xx = np.linspace(-1, 5, 10)
yy = np.linspace(-1, 5, 10)
X1, X2 = np.meshgrid(xx, yy)
Z = np.empty(X1.shape)
for (i, j), val in np.ndenumerate(X1):
x1 = val
x2 = X2[i, j]
p = clf.decision_function([x1, x2])
Z[i, j] = p[0]
levels = [-1.0, 0.0, 1.0]
linestyles = ['dashed', 'solid', 'dashed']
colors = 'k'
plt.contour(X1, X2, Z, levels, colors=colors, linestyles=linestyles)
plt.scatter(X[:, 0], X[:, 1], c=Y, cmap=plt.cm.Paired)
plt.axis('tight')
plt.show()
| bsd-3-clause |
feranick/Pi-bot | Old/3_ML-splrcbxyz/piRC_ML.py | 1 | 10653 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
'''
**********************************************************
*
* PiRC - Machine learning train and predict
* version: 20170518b
*
* By: Nicola Ferralis <feranick@hotmail.com>
*
***********************************************************
'''
print(__doc__)
import numpy as np
import sys, os.path, os, getopt, glob, csv
from time import sleep, time
from os.path import exists, splitext
from os import rename
from datetime import datetime, date
from sklearn.neural_network import MLPClassifier, MLPRegressor
from sklearn.externals import joblib
from sklearn.preprocessing import StandardScaler
#**********************************************
''' MultiClassReductor '''
#**********************************************
class MultiClassReductor():
def __self__(self):
self.name = name
totalClass = [[-1,-1],[-1,0],[-1,1],[0,-1],[0,0],[0,1],[1,-1],[1,0],[1,1]]
def transform(self,y):
Cl = np.zeros(y.shape[0])
for j in range(len(y)):
Cl[j] = self.totalClass.index(np.array(y[j]).tolist())
return Cl
def inverse_transform(self,a):
return self.totalClass[int(a)]
#**********************************************
''' General parameters'''
#**********************************************
class params:
timeDelay = 0.25
filename = 'Training_splrcbxyz.txt'
runFullAuto = False
debug = False # do not activate sensors or motors in debug mode
#**********************************************
''' Neural Networks'''
#**********************************************
class nnDef:
runNN = True
nnAlwaysRetrain = False
syncTimeLimit = 20 # time in seconds for NN model synchronization
syncTrainModel = False
saveNewTrainingData = False
useRegressor = False
scaler = StandardScaler()
mlp = MultiClassReductor()
''' Solver for NN
lbfgs preferred for small datasets
(alternatives: 'adam' or 'sgd') '''
nnSolver = 'lbfgs'
nnNeurons = 10 #default = 10
#**********************************************
''' Main '''
#**********************************************
def main():
try:
opts, args = getopt.getopt(sys.argv[1:], "rtch:", ["run", "train", "collect", "help"])
except:
usage()
sys.exit(2)
if opts == []:
usage()
sys.exit(2)
try:
sys.argv[3]
if sys.argv[3] in ("-C", "--Classifier"):
nnDef.useRegressor = False
elif sys.argv[3] in ("-R", "--Regressor"):
nnDef.useRegressor = True
except:
nnDef.useRegressor = False
for o, a in opts:
if o in ("-r" , "--run"):
try:
runAuto(sys.argv[2],params.runFullAuto)
except:
exitProg()
if o in ("-t" , "--train"):
try:
runTrain(sys.argv[2])
except:
sys.exit(2)
if o in ("-c" , "--collect"):
try:
writeTrainFile()
except:
exitProg()
#*************************************************
''' runAuto '''
''' Use ML models to predict steer and power '''
#*************************************************
def runAuto(trainFile, type):
trainFileRoot = os.path.splitext(trainFile)[0]
Cl, sensors = readTrainFile(trainFile)
clf = runNN(sensors, Cl, trainFileRoot)
fullStop(False)
syncTime = time()
while True:
if time() - syncTime > nnDef.syncTimeLimit and nnDef.syncTrainModel == True:
print(" Reloading NN model...")
clf = runNN(sensors, Cl, trainFileRoot)
print(" Synchronizing NN model...\n")
os.system("./syncTFile.sh " + trainFileRoot + " &")
syncTime = time()
if type == False:
print(" Running \033[1mPartial Auto\033[0m Mode\n")
s, p = predictDrive(clf)
drive(s,p)
sleep(params.timeDelay)
else:
print(" Running \033[1mFull Auto\033[0m Mode\n")
dt=0
t1=time()
while dt < 0.5:
s, p = predictDrive(clf)
if p != 0:
dt = 0
drive(s,p)
else:
dt = time() - t1
sleep(params.timeDelay)
drive(0, 1)
sleep(0.5)
drive(0, 0)
#*************************************************
''' runTrain '''
''' Use ML models to predict steer and power '''
#*************************************************
def runTrain(trainFile):
trainFileRoot = os.path.splitext(trainFile)[0]
Cl, sensors = readTrainFile(trainFile)
nnDef.nnAlwaysRetrain = True
runNN(sensors, Cl, trainFileRoot)
#*************************************************
''' write training file from sensors '''
#*************************************************
def writeTrainFile():
while True:
import piRC_lib
s,p,l,r,c,b,x,y,z = piRC_lib.readAllSensors()
print(' S={0:.0f}, P={1:.0f}, L={2:.0f}, R={3:.0f}, C={4:.0f}, B={5:.0f}, X={6:.3f}, Y={7:.3f}, Z={8:.3f}'.format(s,p,l,r,c,b,x,y,z))
with open(params.filename, "a") as sum_file:
sum_file.write('{0:.0f}\t{1:.0f}\t{2:.0f}\t{3:.0f}\t{4:.0f}\t{5:.0f}\t{6:.3f}\t{7:.3f}\t{8:.3f}\n'.format(s,p,l,r,c,b,x,y,z))
#*************************************************
''' read Train File '''
#*************************************************
def readTrainFile(trainFile):
try:
with open(trainFile, 'r') as f:
M = np.loadtxt(f, unpack =False)
except:
print('\033[1m' + ' Training file not found \n' + '\033[0m')
return
steer = M[:,0]
power = M[:,1]
Cl = M[:,[0,1]]
sensors = np.delete(M,np.s_[0:2],1)
return Cl, sensors
#********************************************************************************
''' Run Neural Network '''
#********************************************************************************
def runNN(sensors, Cl, Root):
if nnDef.useRegressor is False:
nnTrainedData = Root + '.nnModelC.pkl'
else:
nnTrainedData = Root + '.nnModelR.pkl'
print(' Running Neural Network: multi-layer perceptron (MLP) - (solver: ' + nnDef.nnSolver + ')...')
sensors = nnDef.scaler.fit_transform(sensors)
if nnDef.useRegressor is False:
Y = nnDef.mlp.transform(Cl)
else:
Y = Cl
try:
if nnDef.nnAlwaysRetrain == False:
with open(nnTrainedData):
print(' Opening NN training model...\n')
clf = joblib.load(nnTrainedData)
else:
raise ValueError('Force NN retraining.')
except:
#**********************************************
''' Retrain data if not available'''
#**********************************************
print(' Retraining NN model...\n')
if nnDef.useRegressor is False:
clf = MLPClassifier(solver=nnDef.nnSolver, alpha=1e-5, hidden_layer_sizes=(nnDef.nnNeurons,), random_state=1)
else:
clf = MLPRegressor(solver=nnDef.nnSolver, alpha=1e-5, hidden_layer_sizes=(nnDef.nnNeurons,), random_state=9)
clf.fit(sensors, Y)
joblib.dump(clf, nnTrainedData)
return clf
#*************************************************
''' Predict drive pattern '''
#*************************************************
def predictDrive(clf):
np.set_printoptions(suppress=True)
sp = [0,0]
if params.debug is True:
s,p,l,r,c,b,x,y,z = [-1,-1,116,117,111,158,0.224,0.108,1.004]
else:
import piRC_lib
s,p,l,r,c,b,x,y,z = piRC_lib.readAllSensors()
print(' S={0:.0f}, P={1:.0f}, L={2:.0f}, R={3:.0f}, C={4:.0f}, B={5:.0f}, X={6:.3f}, Y={7:.3f}, Z={8:.3f}'.format(s,p,l,r,c,b,x,y,z))
nowsensors = np.array([[round(l,0),round(r,0),round(c,0),round(b,0),round(x,3),round(y,3),round(z,3)]]).reshape(1,-1)
if nnDef.useRegressor is False:
nowsensors = nnDef.scaler.transform(nowsensors)
try:
sp[0] = nnDef.mlp.inverse_transform(clf.predict(nowsensors)[0])[0]
sp[1] = nnDef.mlp.inverse_transform(clf.predict(nowsensors)[0])[1]
except:
sp = [0,0]
print('\033[1m' + '\n Predicted classification value (Neural Networks) = ( S=',str(sp[0]),', P=',str(sp[1]),')')
prob = clf.predict_proba(nowsensors)[0].tolist()
print(' (probability = ' + str(round(100*max(prob),4)) + '%)\033[0m\n')
else:
sp = clf.predict(nowsensors)[0]
print('\033[1m' + '\n Predicted regression value (Neural Networks) = ( S=',str(sp[0]),', P=',str(sp[1]),')')
for k in range(2):
if sp[k] >= 1:
sp[k] = 1
elif sp[k] <= -1:
sp[k] = -1
else:
sp[k] = 0
print('\033[1m' + ' Predicted regression value (Neural Networks) = ( S=',str(sp[0]),', P=',str(sp[1]),') Normalized\n')
if nnDef.saveNewTrainingData is True:
with open(params.filename, "a") as sum_file:
sum_file.write('{0:.0f}\t{1:.0f}\t{2:.0f}\t{3:.0f}\t{4:.0f}\t{5:.0f}\t{6:.3f}\t{7:.3f}\t{8:.3f}\n'.format(sp[0],sp[1],l,r,c,b,x,y,z))
return sp[0], sp[1]
#*************************************************
''' Drive '''
#*************************************************
def drive(s,p):
if params.debug is False:
import piRC_lib
piRC_lib.runMotor(0,s)
piRC_lib.runMotor(1,p)
def fullStop(type):
if params.debug is False:
import piRC_lib
piRC_lib.fullStop(type)
#*************************************************
''' Lists the program usage '''
#*************************************************
def usage():
print('\n Usage:')
print('\n Training (Classifier):\n python3 piRC_ML.py -t <train file>')
print('\n Prediction (Classifier):\n python3 piRC_ML.py -r <train file>')
print('\n Training (Regression):\n python3 piRC_ML.py -t <train file> -R')
print('\n Prediction (Regression):\n python3 piRC_ML.py -r <train file> -R')
print('\n Collect data from sensors into training file:\n python3 piRC_ML.py -c')
print('\n (Separate trained models are created for regression and classification\n')
print(' Requires python 3.x. Not compatible with python 2.x\n')
def exitProg():
fullStop(True)
sys.exit(2)
#*************************************************
''' Main initialization routine '''
#*************************************************
if __name__ == "__main__":
sys.exit(main())
| gpl-3.0 |
belteshassar/cartopy | lib/cartopy/tests/mpl/test_set_extent.py | 3 | 6485 | # (C) British Crown Copyright 2011 - 2016, Met Office
#
# This file is part of cartopy.
#
# cartopy is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the
# Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# cartopy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with cartopy. If not, see <https://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
import tempfile
from matplotlib.testing.decorators import cleanup
import matplotlib.pyplot as plt
import numpy as np
from numpy.testing import assert_array_almost_equal, assert_array_equal
import cartopy.crs as ccrs
@cleanup
def test_extents():
# tests that one can set the extents of a map in a variety of coordinate
# systems, for a variety of projection
uk = [-12.5, 4, 49, 60]
uk_crs = ccrs.Geodetic()
ax = plt.axes(projection=ccrs.PlateCarree())
ax.set_extent(uk, crs=uk_crs)
# enable to see what is going on (and to make sure it is a plot of the uk)
# ax.coastlines()
assert_array_almost_equal(ax.viewLim.get_points(),
np.array([[-12.5, 49.], [4., 60.]]))
ax = plt.axes(projection=ccrs.NorthPolarStereo())
ax.set_extent(uk, crs=uk_crs)
# enable to see what is going on (and to make sure it is a plot of the uk)
# ax.coastlines()
assert_array_almost_equal(ax.viewLim.get_points(),
np.array([[-1034046.22566261, -4765889.76601514],
[333263.47741164, -3345219.0594531]])
)
# given that we know the PolarStereo coordinates of the UK, try using
# those in a PlateCarree plot
ax = plt.axes(projection=ccrs.PlateCarree())
ax.set_extent([-1034046, 333263, -4765889, -3345219],
crs=ccrs.NorthPolarStereo())
# enable to see what is going on (and to make sure it is a plot of the uk)
# ax.coastlines()
assert_array_almost_equal(ax.viewLim.get_points(),
np.array([[-17.17698577, 48.21879707],
[5.68924381, 60.54218893]])
)
@cleanup
def test_domain_extents():
# Setting the extent to global or the domain limits.
ax = plt.axes(projection=ccrs.PlateCarree())
ax.set_extent((-180, 180, -90, 90))
assert_array_equal(ax.viewLim.get_points(), [[-180, -90], [180, 90]])
ax.set_extent((-180, 180, -90, 90), ccrs.PlateCarree())
assert_array_equal(ax.viewLim.get_points(), [[-180, -90], [180, 90]])
ax = plt.axes(projection=ccrs.PlateCarree(90))
ax.set_extent((-180, 180, -90, 90))
assert_array_equal(ax.viewLim.get_points(), [[-180, -90], [180, 90]])
ax.set_extent((-180, 180, -90, 90), ccrs.PlateCarree(90))
assert_array_equal(ax.viewLim.get_points(), [[-180, -90], [180, 90]])
ax = plt.axes(projection=ccrs.OSGB())
ax.set_extent((0, 7e5, 0, 13e5), ccrs.OSGB())
assert_array_equal(ax.viewLim.get_points(), [[0, 0], [7e5, 13e5]])
def test_update_lim():
# check that the standard data lim setting works
ax = plt.axes(projection=ccrs.PlateCarree())
ax.update_datalim([(-10, -10), (-5, -5)])
assert_array_almost_equal(ax.dataLim.get_points(),
np.array([[-10., -10.], [-5., -5.]]))
plt.close()
def test_limits_contour():
xs, ys = np.meshgrid(np.linspace(250, 350, 15), np.linspace(-45, 45, 20))
data = np.sin((xs * ys) * 1.e7)
resulting_extent = np.array([[250 - 180, -45.], [-10. + 180, 45.]])
ax = plt.axes(projection=ccrs.PlateCarree())
ax.coastlines()
plt.contourf(xs, ys, data, transform=ccrs.PlateCarree(180))
assert_array_almost_equal(ax.dataLim, resulting_extent)
plt.close()
ax = plt.axes(projection=ccrs.PlateCarree())
ax.coastlines()
plt.contour(xs, ys, data, transform=ccrs.PlateCarree(180))
assert_array_almost_equal(ax.dataLim, resulting_extent)
plt.close()
def test_limits_pcolor():
xs, ys = np.meshgrid(np.linspace(250, 350, 15), np.linspace(-45, 45, 20))
data = (np.sin((xs * ys) * 1.e7))[:-1, :-1]
resulting_extent = np.array([[250 - 180, -45.], [-10. + 180, 45.]])
ax = plt.axes(projection=ccrs.PlateCarree())
ax.coastlines()
plt.pcolor(xs, ys, data, transform=ccrs.PlateCarree(180))
assert_array_almost_equal(ax.dataLim, resulting_extent)
plt.close()
ax = plt.axes(projection=ccrs.PlateCarree())
ax.coastlines()
plt.pcolormesh(xs, ys, data, transform=ccrs.PlateCarree(180))
assert_array_almost_equal(ax.dataLim, resulting_extent)
plt.close()
def test_view_lim_autoscaling():
x = np.linspace(0.12910209, 0.42141822)
y = np.linspace(0.03739792, 0.33029076)
x, y = np.meshgrid(x, y)
ax = plt.axes(projection=ccrs.RotatedPole(37.5, 357.5))
plt.scatter(x, y, x * y, transform=ccrs.PlateCarree())
expected = np.array([[86.12433701, 52.51570463],
[86.69696603, 52.86372057]])
assert_array_almost_equal(ax.viewLim.frozen().get_points(), expected,
decimal=2)
plt.draw()
assert_array_almost_equal(ax.viewLim.frozen().get_points(), expected,
decimal=2)
ax.autoscale_view(tight=False)
expected_non_tight = np.array([[86, 52.45], [86.8, 52.9]])
assert_array_almost_equal(ax.viewLim.frozen().get_points(),
expected_non_tight, decimal=1)
plt.close()
def test_view_lim_default_global():
ax = plt.axes(projection=ccrs.PlateCarree())
# The view lim should be the default unit bbox until it is drawn.
assert_array_almost_equal(ax.viewLim.frozen().get_points(),
[[0, 0], [1, 1]])
with tempfile.TemporaryFile() as tmp:
plt.savefig(tmp)
expected = np.array([[-180, -90], [180, 90]])
assert_array_almost_equal(ax.viewLim.frozen().get_points(),
expected)
plt.close()
if __name__ == '__main__':
import nose
nose.runmodule(argv=['-s', '--with-doctest'], exit=False)
| gpl-3.0 |
scikit-optimize/scikit-optimize.github.io | 0.8/_downloads/365fdab27864494141feaa35987b301b/partial-dependence-plot-2D.py | 3 | 3291 | """
===========================
Partial Dependence Plots 2D
===========================
Hvass-Labs Dec 2017
Holger Nahrstaedt 2020
.. currentmodule:: skopt
Simple example to show the new 2D plots.
"""
print(__doc__)
import numpy as np
from math import exp
from skopt import gp_minimize
from skopt.space import Real, Categorical, Integer
from skopt.plots import plot_histogram, plot_objective_2D, plot_objective
from skopt.utils import point_asdict
np.random.seed(123)
import matplotlib.pyplot as plt
#############################################################################
dim_learning_rate = Real(name='learning_rate', low=1e-6, high=1e-2, prior='log-uniform')
dim_num_dense_layers = Integer(name='num_dense_layers', low=1, high=5)
dim_num_dense_nodes = Integer(name='num_dense_nodes', low=5, high=512)
dim_activation = Categorical(name='activation', categories=['relu', 'sigmoid'])
dimensions = [dim_learning_rate,
dim_num_dense_layers,
dim_num_dense_nodes,
dim_activation]
default_parameters = [1e-4, 1, 64, 'relu']
def model_fitness(x):
learning_rate, num_dense_layers, num_dense_nodes, activation = x
fitness = ((exp(learning_rate) - 1.0) * 1000) ** 2 + \
(num_dense_layers) ** 2 + \
(num_dense_nodes/100) ** 2
fitness *= 1.0 + 0.1 * np.random.rand()
if activation == 'sigmoid':
fitness += 10
return fitness
print(model_fitness(x=default_parameters))
#############################################################################
search_result = gp_minimize(func=model_fitness,
dimensions=dimensions,
n_calls=30,
x0=default_parameters,
random_state=123
)
print(search_result.x)
print(search_result.fun)
#############################################################################
for fitness, x in sorted(zip(search_result.func_vals, search_result.x_iters)):
print(fitness, x)
#############################################################################
space = search_result.space
print(search_result.x_iters)
search_space = {name: space[name][1] for name in space.dimension_names}
print(point_asdict(search_space, default_parameters))
#############################################################################
print("Plotting now ...")
_ = plot_histogram(result=search_result, dimension_identifier='learning_rate',
bins=20)
plt.show()
#############################################################################
_ = plot_objective_2D(result=search_result,
dimension_identifier1='learning_rate',
dimension_identifier2='num_dense_nodes')
plt.show()
#############################################################################
_ = plot_objective_2D(result=search_result,
dimension_identifier1='num_dense_layers',
dimension_identifier2='num_dense_nodes')
plt.show()
#############################################################################
_ = plot_objective(result=search_result,
plot_dims=['num_dense_layers',
'num_dense_nodes'])
plt.show()
| bsd-3-clause |
MMKrell/pyspace | pySPACE/missions/nodes/classification/base.py | 1 | 89554 | """ Base classes for classification """
import numpy
# import matplotlib as mpl
# mpl.rcParams['text.usetex']=True
# mpl.rcParams['text.latex.unicode']=True
import matplotlib.pyplot as plt
import os
import cPickle
import logging
import math
import numpy
import os
import timeit
import warnings
# base class
from pySPACE.missions.nodes.base_node import BaseNode
# representation of the linear classification vector
from pySPACE.missions.nodes.decorators import BooleanParameter, QNormalParameter, ChoiceParameter, QUniformParameter, \
NormalParameter, NoOptimizationParameter, LogUniformParameter, LogNormalParameter, QLogUniformParameter, \
UniformParameter
from pySPACE.resources.data_types.feature_vector import FeatureVector
# the output is a prediction vector
from pySPACE.resources.data_types.prediction_vector import PredictionVector
@BooleanParameter("regression")
@LogUniformParameter("complexity", min_value=1e-6, max_value=1e3)
@ChoiceParameter("kernel_type", choices=["LINEAR", "POLY", "RBF", "SIGMOID"])
@QNormalParameter("offset", mu=0, sigma=1, q=1)
@UniformParameter("nu", min_value=0.01, max_value=0.99)
@LogNormalParameter("epsilon", shape=0.1 / 2, scale=0.1)
@NoOptimizationParameter("debug")
@QUniformParameter("max_time", min_value=0, max_value=3600, q=1)
@LogNormalParameter("tolerance", shape=0.001 / 2, scale=0.001)
@NoOptimizationParameter("keep_vectors")
@NoOptimizationParameter("use_list")
@NormalParameter("ratio", mu=0.5, sigma=0.5 / 2)
class RegularizedClassifierBase(BaseNode):
""" Basic class for regularized (kernel) classifiers with extra support in
the linear case
This module also implements several concepts of data handling strategies
to keep the set of training samples limited especially in an online
learning scenario. These have been used in the *Data Selection Strategies*
publication. This functionality is currently implemented for the
LibSVMClassifierNode and the SorSvmNode. It requires to replace the
*_complete_training*
**References**
========= ==============================================================
main source: Data Selection Strategies
========= ==============================================================
author Krell, M. M. and Wilshusen, N. and Ignat, A. C., and Kim, S. K.
title `Comparison of Data Selection Strategies For Online Support Vector Machine Classification <http://dx.doi.org/10.5220/0005650700590067>`_
book Proceedings of the International Congress on Neurotechnology, Electronics and Informatics
publisher SciTePress
year 2015
doi 10.5220/0005650700590067
========= ==============================================================
**Parameters**
:class_labels:
Sets the labels of the classes.
This can be done automatically, but setting it will be better,
if you want to have similar predictions values
for classifiers trained on different sets.
Otherwise this variable is built up by occurrence of labels.
Furthermore the important class (ir_class) should get the
second position in the list, such that it gets higher
prediction values by the classifier.
(*recommended, default: []*)
:complexity:
Complexity sets the weighting of punishment for misclassification
in comparison to generalizing classification from the data.
Value in the range from 0 to infinity.
(*optional, default: 1*)
:weight:
Defines an array with two entries to give different complexity
weight on the two used classes.
Set the parameter C of class i to weight*C.
(*optional, default: [1,1]*)
:kernel_type:
Defines the used kernel function.
One of the following Strings: 'LINEAR', 'POLY','RBF', 'SIGMOID'.
- LINEAR ::
u'*v
- POLY ::
(gamma*u'*v + offset)^exponent
- RBF ::
exp(-gamma*|u-v|^2)
- SIGMOID ::
tanh(gamma*u'*v + offset)
(*optional, default: 'LINEAR'*)
:exponent:
Defines parameter for the 'POLY'-kernel.
Equals parameter /degree/ in libsvm-package.
(*optional, default: 2*)
:gamma:
Defines parameter for 'POLY'-,'RBF'- and 'SIGMOID'-kernel.
In libsvm-package it was set to 1/num_features.
For RBF-Kernels we calculate it as described in:
:Paper:
A practical Approach to Model Selection for Support vector
Machines with a Gaussian Kernel
:Author: M. Varewyck and J.-P. Martens.
:Formula: 15
The quasi-optimal complexity should then be found in [0.5,2,8]
or better to say log_2 C should be found in [-1,1,3].
For testing a wider range, you may try: [-2,...,4].
A less accurate version would be to use 1/(num_features*sqrt(2)).
For the other kernels we set it to 1/num_features.
.. warning::
For the RBF-Parameter selection the
the :class:`~pySPACE.missions.nodes.postprocessing.feature_normalization.HistogramFeatureNormalizationNode`
should be used before.
(*optional, default: None*)
:offset:
Defines parameter for 'POLY'- and 'SIGMOID'-kernel.
Equals parameter /coef0/ in libsvm-package.
(*optional, default: 0*)
:nu:
Defines parameter for 'nu-SVC', 'one-class SVM' and 'nu-SVR'. It
approximates the fraction of training errors and support vectors.
Value in the range from 0 to 1.
(*optional, default: 0.5*)
:epsilon:
Defines parameter for 'epsilon-SVR'.
Set the epsilon in loss function of epsilon-SVR.
Equals parameter /p/ in libsvm-package.
(*optional, default: 0.1*)
:tolerance:
tolerance of termination criterion, same default as in libsvm.
In the SOR implementation the tolerance may be reduced to
one tenth of the complexity, if it is higher than this value.
Otherwise it would be no valid stopping criterion.
(*optional, default: 0.001*)
:max_time:
Time for the construction of the classifier
For LibSVM we restrict the number of steps but for cvxopt
we use a signal handling to stop processes.
This may happen, when the parameters are bad chosen or
the problem matrix is to large.
Parameter is still in testing and implementation phase.
The time is given in seconds and as a default, one hour is used.
(*optional, default: 3600*)
:keep_vectors:
After training the training data is normally deleted,
except this variable is set to True.
(*optional, default: False*)
:use_list:
Switch to store samples as *list*. If set to *False* they are stored
as arrays. Used for compatibility with LIBSVM. This parameter should
not be changed by the user.
(*optional, default False*)
:multinomial:
Accept more than two classes.
(*optional, default: False*)
:add_type:
In case the classifier should be retrained, this parameter
specifies which incoming samples should be added to the training
set.
One of the following strings 'ADD_ALL', 'ONLY_MISSCLASSIFIED',
'ONLY_WITHIN_MARGIN', 'UNSUPERVISED_PROB'.
- ADD_ALL
Add all incoming samples.
- ONLY_MISSCLASSIFIED
Add only those samples that were misclassified by the current
decision function.
**References**
========= ==================================================
minor
========= ==================================================
author Bordes, Antoine and Ertekin, Seyda and Weston,
Jason and Bottou, L{\'e}on
title Fast Kernel Classifiers with Online and Active
Learning
journal J. Mach. Learn. Res.
volume 6
month dec
year 2005
issn 1532-4435
pages 1579--1619
numpages 41
publisher JMLR.org
========= ==================================================
- ONLY_WITHIN_MARGIN
Add only samples that lie within the margin of
the SVM.
**References**
========= ==================================================
main
========= ==================================================
author Bordes, Antoine and Ertekin, Seyda and Weston,
Jason and Bottou, L{\'e}on
title Fast Kernel Classifiers with Online and Active
Learning
journal J. Mach. Learn. Res.
volume 6
month dec
year 2005
issn 1532-4435
pages 1579--1619
numpages 41
publisher JMLR.org
========= ==================================================
========= ==================================================
main
========= ==================================================
author Oskoei, M.A. and Gan, J.Q. and Huosheng Hu
booktitle Engineering in Medicine and Biology Society, 2009.
EMBC 2009. Annual International Conference of the
IEEE
title Adaptive schemes applied to online SVM for BCI
data classification
year 2009
month Sept
pages 2600-2603
ISSN 1557-170X
========= ==================================================
- UNSUPERVISED_PROB
Classify the label with the current decision function and
determine how probable this decision is. If it is most likely
right, which means the probability exceeds a threshold, add the
sample to the training set.
**References**
========= ==================================================
main
========= ==================================================
author Sp{\"u}ler, Martin and Rosenstiel, Wolfgang and
Bogdan, Martin
year 2012
isbn 978-3-642-33268-5
booktitle Artificial Neural Networks and Machine Learning -
ICANN 2012
volume 7552
series Lecture Notes in Computer Science
editor Villa, AlessandroE.P. and Duch, W\lodzis\law and
\'{E}rdi, P\'{e}ter and Masulli, Francesco and
Palm, G{\"u}nther
title Adaptive SVM-Based Classification Increases
Performance of a MEG-Based Brain-Computer
Interface (BCI)
publisher Springer Berlin Heidelberg
pages 669-676
language English
========= ==================================================
(*optional, default: "ADD_ALL"*)
:discard_type:
In case the classifier should be retrained this parameter
specifies which samples from the training set should be discarded
to keep the training set small.
One of the following strings 'REMOVE_OLDEST', 'REMOVE_FARTHEST',
'REMOVE_NO_BORDER_POINTS', 'INC', 'INC_BATCH', 'CDT',
'DONT_DISCARD'.
- REMOVE_OLDEST
Remove the oldest sample from the training set.
**References**
========= ==================================================
main
========= ==================================================
title Online weighted LS-SVM for hysteretic structural
system identification
journal Engineering Structures
volume 28
number 12
pages 1728 - 1735
year 2006
issn 0141-0296
author He-Sheng Tang and Song-Tao Xue and Rong Chen and
Tadanobu Sato
========= ==================================================
========= ==================================================
minor
========= ==================================================
author Van Vaerenbergh, S. and Via, J. and Santamaria, I.
booktitle Acoustics, Speech and Signal Processing, 2006.
ICASSP 2006 Proceedings. 2006 IEEE International
Conference on
title A Sliding-Window Kernel RLS Algorithm and Its
Application to Nonlinear Channel Identification
year 2006
month May
volume 5
ISSN 1520-6149
========= ==================================================
========= ==================================================
minor
========= ==================================================
author Funaya, Hiroyuki and Nomura, Yoshihiko
and Ikeda, Kazushi
booktitle ICONIP (1)
date 2009-10-26
editor K{\"o}ppen, Mario and Kasabov, Nikola K.
and Coghill, George G.
isbn 978-3-642-02489-4
keywords dblp
pages 929-936
publisher Springer
series Lecture Notes in Computer Science
title A Support Vector Machine with Forgetting Factor
and Its Statistical Properties.
volume 5506
year 2008
========= ==================================================
========= ==================================================
minor
========= ==================================================
title On-Line One-Class Support Vector Machines. An
Application to Signal Segmentation
author Gretton, A and Desobry, F
year 2003
date 2003-04
journal IEEE ICASSP Vol. 2
pages 709--712
========= ==================================================
- INC
Don't remove any sample, but retrain the SVM/classifier
incrementally with each incoming sample.
**References**
========= ==================================================
main
========= ==================================================
year 2012
isbn 978-3-642-34155-7
booktitle Advances in Intelligent Data Analysis XI
volume 7619
series Lecture Notes in Computer Science
editor Hollm\'{e}n, Jaakko and Klawonn, Frank
and Tucker, Allan
title Batch-Incremental versus Instance-Incremental
Learning in Dynamic and Evolving Data
publisher Springer Berlin Heidelberg
author Read, Jesse and Bifet, Albert and Pfahringer,
Bernhard and Holmes, Geoff
pages 313-323
========= ==================================================
- CDT
Detect changes in the distribution of the data and adapt the
classifier accordingly, by throwing old samples away and only
take the last few for retraining.
**References**
========= ==================================================
main
========= ==================================================
author Alippi, C. and Derong Liu and Dongbin Zhao
and Li Bu
journal Systems, Man, and Cybernetics: Systems, IEEE
Transactions on
title Detecting and Reacting to Changes in Sensing
Units: The Active Classifier Case
year 2014
month March
volume 44
number 3
pages 353-362
ISSN 2168-2216
========= ==================================================
========= ==================================================
minor
========= ==================================================
title Intelligence for embedded systems: a
methodological approach
author Cesare Alippi
publisher Springer
address Cham [u.a.]
year 2014
ISBN 978-3-319-05278-6
pages 211-247
chapter Learning in Nonstationary and Evolving
Environments
========= ==================================================
- INC_BATCH
Collect new samples until a basket size is reached. Then throw
all old samples away. And retrain the classifier with the
current training set.
**References**
========= ==================================================
main
========= ==================================================
year 2012
isbn 978-3-642-34155-7
booktitle Advances in Intelligent Data Analysis XI
volume 7619
series Lecture Notes in Computer Science
editor Hollm\'{e}n, Jaakko and Klawonn, Frank
and Tucker, Allan
title Batch-Incremental versus Instance-Incremental
Learning in Dynamic and Evolving Data
publisher Springer Berlin Heidelberg
author Read, Jesse and Bifet, Albert
and Pfahringer,Bernhard and Holmes, Geoff
pages 313-323
========= ==================================================
- DONT_DISCARD
Don't remove any samples from the training set.
- REMOVE_FARTHEST
Remove that sample that is farthest away from the hyperplane.
- REMOVE_NO_BORDER_POINTS
Remove all points that are not in the border of their class.
**References**
========= ==================================================
main
========= ==================================================
title Incremental SVM based on reserved set for network
intrusion detection
journal Expert Systems with Applications
volume 38
number 6
pages 7698 - 7707
year 2011
issn 0957-4174
author Yang Yi and Jiansheng Wu and Wei Xu
========= ==================================================
(*optional, default: "REMOVE_OLDEST"*)
:keep_only_sv:
Because only the support vectors determine the decision function
remove all other samples after the SVM is trained.
(*optional, default: False*)
:basket_size:
Specify the number of training samples for retraining.
(*optional, default: infinity*)
:relabel:
Relabel the training set after the SVM is trained.
If the parameter is set to *True*, the relabeling is done once.
Otherwise, if the parameter is set to *conv*
relabeling is repeated till convergence (with a maximum of
10 iterations over the complete training data to ensure stopping).
The maximum number of iterations is reset after each relabeling.
(*optional, default: False*)
:border_handling:
Specify how to determine border points in case the discard_type:
'REMOVE_ONLY_BORDER_POINTS' is selected.
One of the following strings 'USE_ONLY_BORDER_POINTS',
'USE_DIFFERENCE'.
- USE_ONLY_BORDER_POINTS
Keep only those points which distance to the center lie within
a specified range.
- USE_DIFFERENCE
Use the difference from the center of the class as criterion
to determine the border points of the class.
(*optional, default: USE_ONLY_BORDER_POINTS*)
:scale_factor_small:
Factor to specify the distance of the inner border to the center
of a class.
This should be smaller than *scale_factor_tall*. ::
inner border = scale_factor_small * distance between centers
(*optional, default: 0.3*)
:scale_factor_tall:
Factor to specify the distance of the outer border to the center
of a class.
This should be greater than *scale_factor_small*. ::
outer border = scale_factor_tall * distance between centers
(*optional, default: 0.5*)
:p_threshold:
Probability threshold for unsupervised learning. Only data that is
most likely right (p>p_threshold) classified will be added to
training set.
(*optional, default: 0.8*)
:cdt_threshold:
Specify a multiple of the amount of support vectors before the SVM
should be retrained anyway, does not matter if something changed or
not.
(*optional, default: 10*)
:training_set_ratio:
Handle the ratio of the classes. One of the following strings:
"DONT_HANDLE_RATIO", "KEEP_RATIO_AS_IT_IS", "BALANCED_RATIO"
- DONT_HANDLE_RATIO
Dont handle the ratio between the classes and dont consider
the class labels of the samples.
- KEEP_RATIO_AS_IT_IS
Dont change the ratio between the classes. If a sample from one
class is added an other sample from the same class will be
removed from the training set.
- BALANCED_RATIO
Try to keep a balanced training set with just as many positive
samples as negatives.
(*optional, default: DONT_HANDLE_RATIO"*)
:u_retrain:
For the retraining, not the given label is used but it is replaced
with the prediction of the current classifier. This option is
interesting, where no true label can be provided and a fake label
is used instead. It is related to the parameter *p_threshold* and
the *relabel* parameter. The latter allows for a correction of the
possibly wrong label and the first avoids to use to
unsure predictions
The *retrain* parameter has to be additionally set to *True* for
this parameter to become really active.
(*optional, default: False*)
:show_plot:
Plot the samples and the decision function.
(*optional, default: False*)
:save_plot:
Save the plot of the samples and the decision function.
(*optional, default: False*)
:plot_storage:
Specify a directory to store the images of the plots.
If directory does not exists, it will be created.
(*optional, default: "./plot_storage"*)
.. note:: Not all parameter effects are implemented for all inheriting
nodes. Kernels are available for LibSVMClassifierNode and
partially for other nodes.
The *tolerance* has only an effect on Liblinear, LibSVM and SOR
classifier.
:input: FeatureVector
:output: PredictionVector
:Author: Mario Krell (mario.krell@dfki.de)
:Created: 2012/03/28
"""
def __init__(self, regression=False,
complexity=1, weight=None, kernel_type='LINEAR',
exponent=2, gamma=None, offset=0, nu=0.5, epsilon=0.1,
class_labels=None, debug=False, max_time=3600,
tolerance=0.001,
complexities_path=None,
keep_vectors=False, use_list=False,
multinomial=False,
add_type="ADD_ALL",
discard_type="REMOVE_OLDEST",
keep_only_sv=False,
basket_size=numpy.inf,
relabel=False,
border_handling="USE_ONLY_BORDER_POINTS",
scale_factor_small=0.3,
scale_factor_tall=0.5,
p_threshold=0.8,
show_plot=False,
save_plot=False,
cdt_threshold=10,
u_retrain=False,
training_set_ratio="DONT_HANDLE_RATIO",
plot_storage="./plot_storage",
ratio=0.5,
**kwargs):
super(RegularizedClassifierBase, self).__init__(**kwargs)
# type conversion
complexity = float(complexity)
if complexity<1e-10:
self._log("Complexity (%.42f) is very small."+\
"Try rescaling data or check this behavior."\
% complexity, level=logging.WARNING)
if self.is_retrainable() or basket_size != numpy.inf:
keep_vectors=True
if class_labels is None:
class_labels = []
if ratio < 0.01:
self._log("Ratio (%.2f) is to small. Setting to 0.01" % ratio)
ratio = 0.01
elif ratio > 0.99:
self._log("Ratio (%.2f) is to large. Setting to 0.99" % ratio)
ratio = 0.99
if weight is None:
weight = [ratio, 1 - ratio]
################ Only for printing ###########################
is_plot_active = False
scat = None
scatStandard = None
scatTarget = None
surf = None
is_retraining = False
is_trained = False
circleTarget0 = None
circleTarget1 = None
circleStandard0 = None
circleStandard1 = None
m_counter_i = 0
################# Only to store results ########################
if save_plot == True:
if show_plot == False:
plt.ioff()
# Create storage folder if it does not exists
try:
import time
plot_storage += os.path.sep + time.strftime("%d-%m-%Y_%H_%M_%S")
os.makedirs(plot_storage)
except OSError:
if os.path.exists(plot_storage):
pass # Path should already exists
else:
raise # Error on creation
################################################################
self.set_permanent_attributes(samples=None, labels=None,
future_samples=[], future_labels=[],
classes=class_labels,
weight=weight,
kernel_type=kernel_type,
complexity=complexity,
exponent=exponent, gamma=gamma,
offset=offset, nu=nu,
epsilon=epsilon, debug=debug,
tolerance=tolerance,
w=None, b=0, dim=None,
feature_names=None,
complexities_path=complexities_path,
regression=regression,
keep_vectors=keep_vectors,
max_time=max_time,
steps=0,
retraining_needed=False,
use_list=use_list,
multinomial=multinomial,
classifier_information={},
add_type=add_type,
discard_type=discard_type,
keep_only_sv=keep_only_sv,
basket_size=basket_size,
relabel=relabel,
border_handling=border_handling,
scale_factor_small=scale_factor_small,
scale_factor_tall=scale_factor_tall,
p_threshold=p_threshold,
u_retrain=u_retrain,
cdt_threshold=cdt_threshold,
training_set_ratio=training_set_ratio,
show_plot=show_plot,
save_plot=save_plot,
plot_storage=plot_storage,
scat=scat,
scatStandard=scatStandard,
scatTarget=scatTarget,
surf=surf,
is_retraining=is_retraining,
is_trained=is_trained,
# parameters for circles around
# first and second class
circleTarget0=circleTarget0,
circleTarget1=circleTarget1,
circleStandard0=circleStandard0,
circleStandard1=circleStandard1,
m_counter_i=m_counter_i,
# collection of classification scores
# for probability fits
decisions=[],
is_plot_active=is_plot_active,
)
def stop_training(self):
""" Wrapper around stop training for measuring times """
if self.samples is None or len(self.samples) == 0:
self._log("No training data given to classification node (%s), "
% self.__class__.__name__ + "wrong class labels "
+ "used or your classifier is not using samples.",
level=logging.CRITICAL)
start_time_stamp = timeit.default_timer()
super(RegularizedClassifierBase, self).stop_training()
stop_time_stamp = timeit.default_timer()
if not self.classifier_information.has_key("Training_time(classifier)"):
self.classifier_information["Training_time(classifier)"] = \
stop_time_stamp - start_time_stamp
else:
self.classifier_information["Training_time(classifier)"] += \
stop_time_stamp - start_time_stamp
def is_trainable(self):
""" Returns whether this node is trainable """
return True
def is_supervised(self):
""" Returns whether this node requires supervised training """
return True
def delete_training_data(self):
""" Check if training data can be deleted to save memory """
if not (self.keep_vectors or self.is_retrainable()):
self.samples = []
self.labels = []
self.decisions = []
def __getstate__(self):
""" Return a pickable state for this object """
odict = super(RegularizedClassifierBase, self).__getstate__()
if self.kernel_type == 'LINEAR':
# if 'labels' in odict:
# odict['labels'] = []
# if 'samples' in odict:
# odict['samples'] = []
if 'model' in odict:
del odict['model']
else:
if 'model' in odict:
del odict['model']
return odict
def store_state(self, result_dir, index=None):
""" Stores this node in the given directory *result_dir* """
if self.store and self.kernel_type == 'LINEAR':
node_dir = os.path.join(result_dir, self.__class__.__name__)
from pySPACE.tools.filesystem import create_directory
create_directory(node_dir)
try:
self.features
except:
if type(self.w) == FeatureVector:
self.features = self.w
elif self.w is not None:
self.features = FeatureVector(self.w.T, self.feature_names)
else:
self.features = None
if self.features is not None:
# This node stores the learned features
name = "%s_sp%s.pickle" % ("features", self.current_split)
result_file = open(os.path.join(node_dir, name), "wb")
result_file.write(cPickle.dumps(self.features, protocol=2))
result_file.close()
name = "%s_sp%s.yaml" % ("features", self.current_split)
result_file = open(os.path.join(node_dir, name), "wb")
result_file.write(str(self.features))
result_file.close()
del self.features
def __setstate__(self, sdict):
""" Restore object from its pickled state """
super(RegularizedClassifierBase, self).__setstate__(sdict)
if self.kernel_type != 'LINEAR':
# Retraining the svm is not a semantically clean way of restoring
# an object but its by far the most simple solution
self._log("Requires retraining of the classifier")
if self.samples is not None:
self._complete_training()
def get_sensor_ranking(self):
""" Transform the classification vector to a sensor ranking
This method will fail, if the classification vector variable
``self.features`` is not existing.
This is for example the case when using nonlinear classification with
kernels.
"""
if not "features" in self.__dict__:
self.features = FeatureVector(
numpy.atleast_2d(self.w).astype(numpy.float64),
self.feature_names)
self._log("No features variable existing to create generic sensor "
"ranking in %s."%self.__class__.__name__, level=logging.ERROR)
# channel name is what comes after the first underscore
feat_channel_names = [chnames.split('_')[1]
for chnames in self.features.feature_names]
from collections import defaultdict
ranking_dict = defaultdict(float)
for i in range(len(self.features[0])):
ranking_dict[feat_channel_names[i]] += abs(self.features[0][i])
ranking = sorted(ranking_dict.items(),key=lambda t: t[1])
return ranking
def _train(self, data, class_label):
""" Add a new sample with associated label to the training set.
In case of neither incremental learning nor the
restriction of training samples is used,
add the samples to the training set.
Otherwise check whether the classifier is already trained and if so
select an appropriate training set and retrain the classifier.
If the classifier is not trained, train it when there are enough
samples available.
:param data: A new sample for the training set.
:type data: list of float
:param class_label: The label of the new sample.
:type class_label: str
"""
if not self.is_retrainable() and self.basket_size == numpy.inf:
self._train_sample(data, class_label)
else:
# should not be relevant because first the classifier will be
# trained if the basket size is reached but after the first training
# only inc_train should adapt the classifier no matter how many
# samples are in the training set
if self.samples is not None and self.is_trained:
self.adapt_training_set(data, class_label)
else:
self._train_sample(data, class_label)
if self.show_plot or self.save_plot:
plt.clf()
if len(self.samples) >= self.basket_size:
if not self.is_trained:
self._complete_training()
if self.discard_type == "CDT":
self.learn_CDT()
self.is_trained = True
def _train_sample(self, data, class_label):
""" Train the classifier on the given data sample
It is assumed that the class_label parameter
contains information about the true class the data belongs to.
:param data: A new sample for the training set.
:type data: FeatureVector
:param class_label: The label of the new sample.
:type class_label: str.
"""
if self.feature_names is None:
try:
self.feature_names = data.feature_names
except AttributeError as e:
warnings.warn("Use a feature generator node before a " +
"classification node.")
raise e
if self.dim is None:
self.dim = data.shape[1]
if self.samples is None:
self.samples = []
# self.decision is by default set to empty list
if self.add_type == "UNSUPERVISED_PROB":
self.decisions = []
if self.labels is None:
self.labels = []
if self.discard_type == "INC_BATCH":
self.future_samples = []
self.future_labels = []
if class_label not in self.classes and "REST" not in self.classes and \
not self.regression:
warnings.warn(
"Please give the expected classes to the classifier! " +
"%s unknown. " % class_label +
"Therefore define the variable 'class_labels' in " +
"your spec file, where you use your classifier. " +
"For further info look at the node documentation.")
if self.multinomial or not(len(self.classes) == 2):
self.classes.append(class_label)
self.set_permanent_attributes(classes=self.classes)
# main step of appending data to the list *self.samples*
if class_label in self.classes or self.regression:
self.append_sample(data)
if not self.regression and class_label in self.classes:
self.labels.append(self.classes.index(class_label))
elif not self.regression and "REST" in self.classes:
self.labels.append(self.classes.index("REST"))
elif self.regression: # regression!
try:
self.labels.append(float(class_label))
except ValueError: # one-class-classification is regression-like
self.labels.append(1)
else: # case, where data is irrelevant
pass
def train(self, data, label):
""" Special mapping for multi-class classification
It enables label filtering for one vs. REST and one vs. one case.
Furthermore, the method measures time for the training segments.
"""
# one vs. REST case
if "REST" in self.classes and not label in self.classes:
label = "REST"
# one vs. one case
if not self.multinomial and len(self.classes) == 2 and \
not label in self.classes:
return
start_time_stamp = timeit.default_timer()
super(RegularizedClassifierBase, self).train(data, label)
stop_time_stamp = timeit.default_timer()
if not self.classifier_information.has_key("Training_time(classifier)"):
self.classifier_information["Training_time(classifier)"] = \
stop_time_stamp - start_time_stamp
else:
self.classifier_information["Training_time(classifier)"] += \
stop_time_stamp - start_time_stamp
def append_sample(self, sample):
""" Some methods need a list of arrays as lists and some prefer arrays
"""
data_array = sample.view(numpy.ndarray)
if self.use_list:
self.samples.append(map(float, list(data_array[0, :])))
else:
self.samples.append(data_array[0, :])
def _execute(self, x):
""" Executes the classifier on the given data vector in the linear case
prediction value = <w,data>+b
"""
if self.kernel_type == 'LINEAR':
data = x.view(numpy.ndarray)
# Let the SVM classify the given data: <w,data>+b
if self.w is None:
prediction_value = 0
self.w = numpy.zeros(x.shape[1])
else:
prediction_value = float(numpy.dot(self.w.T, data[0, :]))+self.b
# one-class multinomial handling of REST class
if "REST" in self.classes and self.multinomial:
if "REST" == self.classes[0]:
label = self.classes[1]
elif "REST" == self.classes[1]:
label = self.classes[0]
prediction_value *= -1
# Look up class label
# prediction_value --> {-1,1} --> {0,1} --> Labels
elif prediction_value > 0:
label = self.classes[1]
else:
label = self.classes[0]
return PredictionVector(label=label, prediction=prediction_value,
predictor=self)
def get_own_transformation(self, sample=None):
""" Use classification function e.g. for visualization in LINEAR case
"""
if self.kernel_type == 'LINEAR':
return self.w, self.b, self.feature_names, "linear classifier"
def _inc_train(self, data, class_label=None):
""" Manipulation of training set for updating the svm """
#######################################################################
if not self.classifier_information.has_key("Inc_iterations"):
self.classifier_information["Inc_iterations"] = 1
else:
self.classifier_information["Inc_iterations"] += 1
if self.u_retrain:
class_label = self._execute(data).label
start_time_stamp = timeit.default_timer()
#######################################################################
self.adapt_training_set(data, class_label)
#######################################################################
stop_time_stamp = timeit.default_timer()
if not self.classifier_information.has_key("Retraining_time(classifier)"):
self.classifier_information["Retraining_time(classifier)"] = \
stop_time_stamp - start_time_stamp
else:
self.classifier_information["Retraining_time(classifier)"] += \
stop_time_stamp - start_time_stamp
#######################################################################
def _batch_retrain(self,data_list, label_list):
""" Simply adding the new data to the old one an retraining """
for i in range(label_list):
self._train(data_list[i], label_list[i])
# # retraining is now performed in the train method, since method
# # needs to be retrainable to call _batch_retrain
# self.stop_training()
def print_variables(self):
""" Debug function for printing the classifier and the slack variables
"""
# Precision does not work here because of the strange dtype.
numpy.set_printoptions(edgeitems=50, precision=4, suppress=False,
threshold=50)
# ...Setting the dtype to list doesn't work either.
print self.print_w
print 'This is the classification vector w and b=', self.b, '.'
print self.num_retained_features, ' out of ', self.dim, \
' features have been used.'
print self.num_sv, " vectors of ", self.num_samples, " have been used."
# print self.t, "are the Slack variables."
if not((numpy.array(self.t) >= 0).all()):
print "There are negative slack variables! Classification failed?"
print "%i vectors of %i have been used for the inner margin and" \
% (self.inner_margin, self.num_samples)
numpy.set_printoptions(edgeitems=100, linewidth=75, precision=5,
suppress=True, threshold=1000)
print numpy.array(self.ti), "are the inner Slack variables."
numpy.set_printoptions(edgeitems=3, infstr='Inf', linewidth=75,
nanstr='NaN', precision=8, suppress=False,
threshold=1000)
def kernel_func(self, u, v):
""" Returns the kernel function applied on x and y
- POLY ::
(gamma*u'*v + offset)^exponent
- RBF ::
exp(-gamma*|u-v|^2)
- SIGMOID ::
tanh(gamma*u'*v + offset)
"""
if not self.kernel_type == "LINEAR" and self.gamma is None:
self.calculate_gamma()
if self.kernel_type == "LINEAR":
return float(numpy.dot(u, v))
elif self.kernel_type == "POLY":
h = float(numpy.dot(u, v))
return (self.gamma*h+self.offset)**self.exponent
elif self.kernel_type == "RBF":
return numpy.exp(-self.gamma*float(numpy.sum((u - v)**2)))
elif self.kernel_type == "SIGMOID":
h = float(numpy.dot(u, v))
return numpy.tanh(self.gamma * h + self.offset)
elif self.kernel_type.startswith("lambda "):
function = eval(self.kernel_type)
return float(function(u, v))
def calculate_gamma(self):
""" Calculate default gamma
This defines a parameter for 'POLY'-,'RBF'- and 'SIGMOID'-kernel.
We calculate the parameter `gamma` as described in the base node
description.
"""
if (self.kernel_type == 'POLY' or self.kernel_type == 'SIGMOID') \
and self.gamma is None:
self.gamma = 1.0 / self.dim
elif self.kernel_type == 'RBF' and self.gamma is None and \
not self.regression:
a = self.labels.count(self.classes.index(self.classes[0]))
b = self.labels.count(self.classes.index(self.classes[1]))
if a > b:
relevant = 1
else:
relevant = 0
relevant_samples = []
for i, label in enumerate(self.labels):
if label == relevant:
relevant_samples.append(self.samples[i])
variance = numpy.median(numpy.var(numpy.array(self.samples),
axis=0))
self.gamma = 0.5/(variance*self.dim)
self._log(
"No parameter gamma specified for the kernel. Using: %f."\
% self.gamma,
level=logging.WARNING)
elif self.gamma is None:
self.gamma = 0.001
def adapt_training_set(self, data, class_label=None):
""" Select the samples that should belong to the training set and
retrain the classifier.
For incremental training run through four steps.
1) Add samples to the training set according to some criteria.
2) Discard samples from the training set according to some criteria.
3) Retrain the classifier with the current training set.
4) If used relabel the training set according to the current
decision function.
:param data: A new sample for the training set.
:type data: list of float
:param class_label: The label of the new sample.
:type class_label: str
"""
if (self.show_plot or self.save_plot) and self.is_plot_active == False:
if self.show_plot:
plt.ion()
plt.grid(True)
if self.show_plot:
plt.show()
self.is_plot_active = True
# In case initial training is not already performed, train
# classifier and CDT once.
if self.is_trained == False and self.discard_type != "INC":
self._complete_training()
if self.discard_type == "CDT":
self.learn_CDT()
self.is_trained = True
# specify flag for retraining phase
self.is_retraining = True
########################################################################
# 1) Selection of new data #
########################################################################
[new_data_in_training_set, retraining_required, label] =\
self.select_new_data(data, class_label)
########################################################################
# 2) Discard data #
########################################################################
[new_data_in_training_set, retraining_required] =\
self.discard_data(data, class_label,\
new_data_in_training_set, retraining_required,\
label)
########################################################################
# 3) Retrain #
########################################################################
self.retrain(data, class_label,\
new_data_in_training_set, retraining_required)
########################################################################
# 4) Relabel training set #
########################################################################
self.relabel_training_set()
if self.show_plot or self.save_plot:
self.num_samples = len(self.samples)
self.visualize()
def select_new_data(self, data, class_label):
""" Add the new sample to the training set if it satisfies some
criteria.
:param data: A new sample for the training set.
:type data: list of float
:param class_label: The label of the new sample.
:type class_label: str
:rtype: [flag if new data is in training set, flag if retraining is
required (the new point is a potential sv or a removed
one was a sv)]
"""
ret_label = None
retraining_required = False
new_data_in_training_set = False
if self.add_type == "ONLY_MISSCLASSIFIED":
# get the prediction for the current data
predictionVec = self._execute(data)
# only append misclassified data points to training set
if predictionVec.label != class_label:
if self.discard_type != "INC":
self.add_new_sample(data, class_label)
ret_label = self.classes.index(class_label)
# no need to check if potential support vector
# already confirmed with criteria
retraining_required = True
else:
new_data_in_training_set = True
elif self.add_type == "ADD_ALL":
# add all incomming samples
if self.discard_type != "INC":
self.add_new_sample(data, class_label)
ret_label = self.classes.index(class_label)
retraining_required =\
self.is_potential_support_vector(data, class_label)
else:
new_data_in_training_set = True
elif self.add_type == "ONLY_WITHIN_MARGIN":
# append only samples that are within the margin
# (not on the other side of own border, but really between those
# borderlines)
predictionVec = self._execute(data)
if abs(predictionVec.prediction) < 1.0:
if self.discard_type != "INC":
self.add_new_sample(data, class_label)
ret_label = self.classes.index(class_label)
retraining_required =\
self.is_potential_support_vector(data, class_label)
else:
new_data_in_training_set = True
elif self.add_type == "UNSUPERVISED_PROB":
# unsupervised classification
# only append those samples that were most probably right
# classified
for i in numpy.arange(len(self.decisions), self.num_samples):
predictionVec = self._execute(\
numpy.atleast_2d(self.samples[i]))
self.decisions.append(predictionVec.prediction)
# get number of target and standard samples
prior1 = sum(map(lambda x: x == 1, self.labels))
prior0 = self.num_samples - prior1
# get labels as list of trues and falses
labels = map(lambda x: x == 1, self.labels)
# calculate the label and the probability for the label of the
# given data
[p, label] = self.get_platt_prob(self.decisions,
labels,
prior1, prior0,
data)
if p > self.p_threshold:
self.decisions.append(p)
if self.discard_type != "INC":
self.add_new_sample(data, label)
ret_label = self.classes.index(label)
retraining_required =\
self.is_potential_support_vector(data, label)
else:
new_data_in_training_set = True
return [new_data_in_training_set, retraining_required, ret_label]
def discard_data(self, data, class_label,\
new_data_in_training_set, retraining_required,
label=None):
""" Discard data from training set according to some criteria.
:param data: A new sample for the training set.
:type data: list of float
:param class_label: The label of the new sample.
:type class_label: str
:param new_data_in_training_set: flag if new data is in training set
:type new_data_in_training_set: bool
:param retraining_required: flag if retraining is
requiered (the new point is a potentiell sv or a removed
one was a sv)
:type retraining_required: bool
:rtype: [flag if new data is in training set, flag if retraining is
requiered (the new point is a potentiell sv or a removed
one was a sv)]
"""
# Reset retraining_required flag if a new chunk is not full
if self.discard_type == "INC_BATCH"\
and len(self.future_samples) < self.basket_size:
retraining_required = False
while self.num_samples > self.basket_size\
and (self.discard_type=="REMOVE_OLDEST"\
or self.discard_type=="REMOVE_FARTHEST"):
if self.discard_type == "REMOVE_OLDEST":
# remove the oldest sample
idx = 0# in case "DONT_HANDLE_RATIO"
if self.training_set_ratio == "KEEP_RATIO_AS_IT_IS":
# choose from the training set the oldest sample with the
# same label as the added sample
idx = next((i for i in numpy.arange(len(self.samples))\
if self.labels[i] == label), 0)
elif self.training_set_ratio == "BALANCED_RATIO":
# try to keep the number of samples for each class equal
num_target = sum(l == 1 for l in self.labels)
num_standard = sum(l == 0 for l in self.labels)
if num_target != num_standard:
label = (num_target > num_standard)
idx = next((i for i in numpy.arange(len(self.samples))\
if self.labels[i] == label), 0)
retraining_required = self.remove_samples([idx])\
or retraining_required
elif self.discard_type == "REMOVE_FARTHEST":
# remove the sample which distance is maximal to the
# hyperplane
samples = self.samples # in case "DONT_HANDLE_RATIO"
if self.training_set_ratio == "KEEP_RATIO_AS_IT_IS":
# choose only from that samples with the same label as the
# added sample
samples = []
idxs_label = []
for i in numpy.arange(len(self.samples)):
if self.labels[i] == label:
samples.append(self.samples[i])
idxs_label.append(i)
elif self.training_set_ratio == "BALANCED_RATIO":
# try to keep the number of samples for each class equal
num_target = sum(l == 1 for l in self.labels)
num_standard = sum(l == 0 for l in self.labels)
if num_target != num_standard:
label = (num_target > num_standard)
samples = []
idxs_label = []
for i in numpy.arange(len(self.samples)):
if self.labels[i] == label:
samples.append(self.samples[i])
idxs_label.append(i)
idx = numpy.argmax(map(\
lambda x: abs((self._execute(\
numpy.atleast_2d(x))).prediction),\
samples))
if self.training_set_ratio == "KEEP_RATIO_AS_IT_IS" or\
self.training_set_ratio == "BALANCED_RATIO":
idx = idxs_label[idx]
retraining_required = self.remove_samples([idx])\
or retraining_required
# TODO: add parameter to specify possible overlap
# like x times basket size?
if self.discard_type == "INC_BATCH"\
and len(self.future_samples) == self.basket_size:
# remove all old samples
self.remove_samples(list(numpy.arange(self.num_samples)))
# and add all samples from the future knowledge base
for (d, c_l) in zip(self.future_samples, self.future_labels):
self.add_new_sample(d, c_l, True)
# The whole training set changes so retraining is required
retraining_required = True
if self.discard_type == "REMOVE_NO_BORDER_POINTS":
if len(self.samples) < self.basket_size:
# Don't retrain if basket size is not reached
retraining_required = False
elif len(self.samples) == self.basket_size:
# Retrain if basket size is reached
retraining_required = True
if len(self.samples) > self.basket_size:
# Discard useless data for next iterations
self.remove_no_border_points(retraining_required)
retraining_required = False
if self.discard_type == "CDT":
# test if a change occurred
changeDetected = self.change_detection_test(data, class_label)
# if a change is detected remove old samples
if changeDetected or (numpy.floor_divide(
len(self.future_samples),
self.num_samples) > self.cdt_threshold):
self.remove_samples(numpy.arange(len(self.samples)))
# if a change is detected or many new samples arrived add
# current samples to training set
for (s, l) in zip(self.future_samples, self.future_labels):
self.add_new_sample(s, l, True)
retraining_required = True
else:
retraining_required = False
return [new_data_in_training_set, retraining_required]
def retrain(self, data, class_label,
new_data_in_training_set, retraining_required):
""" Start retraining procedure if the training set changed.
:param data: A new sample for the training set.
:type data: list of float
:param class_label: The label of the new sample.
:type class_label: str
:param new_data_in_training_set: flag if new data is in training set
:type new_data_in_training_set: bool
:param retraining_required: flag if retraining is
required (the new point is a potential sv or a removed
one was a sv)
"""
if self.classifier_information.has_key("Inc_iterations") and\
self.classifier_information["Inc_iterations"] == 1:
self.classifier_information["Retrain_counter"] = 0
if self.discard_type == "INC" and new_data_in_training_set is True:
# Incremental training
self.incremental_training(data, class_label)
if not self.classifier_information.has_key("Retrain_counter"):
self.classifier_information["Retrain_counter"] = 1
else:
self.classifier_information["Retrain_counter"] += 1
else:
if retraining_required:
# retrain the svm
self.retrain_SVM()
if not self.classifier_information.has_key("Retrain_counter"):
self.classifier_information["Retrain_counter"] = 1
else:
self.classifier_information["Retrain_counter"] += 1
if (self.keep_only_sv or
# approaches, where data is removed later on
self.discard_type == "CDT" or
self.discard_type == "INC_BATCH"):
# only keep the sv to save memory
self.remove_non_support_vectors()
def relabel_training_set(self):
""" Relabel the training set according to the current decision function.
"""
iterations = 1
while self.relabel:
changed = False
# relabel all training samples according to
# current decision function
for i in numpy.arange(len(self.samples)):
predictionVec = self._execute(numpy.atleast_2d(self.samples[i]))
if self.labels[i] != self.classes.index(predictionVec.label):
changed = True
self.labels[i] = self.classes.index(predictionVec.label)
# only relevant for SOR classification (outsourcing?)
if "version" in self.__dict__:
for i in range(self.num_samples):
if self.version == "matrix":
self.M[-1, i] *= -1
self.M[i, -1] *= -1
# modified from *calculate_weigts_and_class_factors*
if self.version in ["samples", "matrix"]:
self.bi[i] *= -1
self.ci[i] = self.complexity * \
self.weight[self.labels[i]]
if i < len(self.decisions):
self.decisions[i] = predictionVec.prediction
else:
self.decisions.append(predictionVec.prediction)
if not changed:
break
else: # Retrain the svm with the relabeled training set
self.retrain_SVM()
if not self.relabel == "conv" or iterations >= 10:
break
iterations += 1
def is_potential_support_vector(self, data, class_label=None):
""" Check whether the given data could become a support vector
This is when the data is within, on or on the other side of the
margin.
:param data: A new sample for the training set.
:type data: list of float
:param class_label: The label of the new sample.
:type class_label: str
"""
predictionVec = self._execute(data)
if class_label is not None:
if self.classes.index(class_label) == 1:
return predictionVec.prediction <= 1.0
else:
return predictionVec.prediction >= -1.0
else:
return True
def remove_no_border_points(self, retraining_required):
""" Discard method to remove all samples from the training set that are
not in the border of their class.
The border is determined by a minimum distance from the center of
the class and a maximum distance.
:param retraining_required: flag if retraining is
required (the new point is a potential sv or a removed
one was a sv)
"""
raise NotImplementedError(
"The node %s does not implement a border point handling." \
% self.__class__.__name__)
def add_new_sample(self, data, class_label=None, default=False):
""" Add a new sample to the training set
:param data: A new sample for the training set.
:type data: list of float
:param class_label: The label of the new sample.
:type class_label: str
:param default: Specifies if the sample is added to the current
training set or to a future training set
:param default: bool
"""
raise NotImplementedError(
"The node %s does not implement a add sample routine." \
% self.__class__.__name__)
def remove_samples(self, idxs):
""" Remove the samples at the given indices from the training set
:param: idxs: Indices of the samples to remove.
:type: idxs: list of int
:rtype: bool - True if a support vector was removed.
"""
raise NotImplementedError(
"The node %s does not implement a remove sample routine." \
% self.__class__.__name__)
def remove_non_support_vectors(self):
""" Remove all samples that are no support vectors """
raise NotImplementedError(
"The node %s does not implement a remove SVs routine." \
% self.__class__.__name__)
def retrain_SVM(self):
""" Retrain the svm with the current training set """
# start retraining process
self._complete_training()
self.future_samples = []
self.future_labels = []
if self.discard_type == "CDT":
self.learn_CDT()
def incremental_training(self, data, class_label):
""" Warm Start Implementation by Mario Michael Krell
The saved status of the algorithm, including the Matrix M, is used
as a starting point for the iteration.
Only the problem has to be lifted up one dimension.
"""
raise NotImplementedError(
"The node %s does not implement incremental training." \
% self.__class__.__name__)
def learn_CDT(self):
""" Learn features of the training set to detect changes in the
underlying distribution
"""
raise NotImplementedError(
"The node %s does not implement a CDT." % self.__class__.__name__)
def change_detection_test(self, data, class_label=None):
""" Detect a change of the distribution
:param data: A new sample for the training set.
:type data: list of float
:param class_label: The label of the new sample.
:type class_label: str
:rtype: bool - If change detected return True
"""
raise NotImplementedError(
"The node %s does not implement a change detection test." \
% self.__class__.__name__)
def get_platt_prob(self, deci, label, prior1, prior0, data):
""" Get a probability for the decision of the svm
:param deci: List of decision made for each sample.
:type deci: list of float
:param label: List of labels from the previous samples.
:type label: list of bool
:param prior1: Number of samples of class 1
:type prior1: int
:param prior0: Number of samples of class 0
:type prior0: int
:param data: Sample under investigation
:type data: list of float
:rtype: [float, int] - probability and the corresponding label
"""
[A, B] = self.approximate_AB_for_plat_prob(deci, label, prior1, prior0)
predictionVec = self._execute(data)
f = predictionVec.prediction
fApB = f * A + B
if fApB >= 0:
p = numpy.exp(-fApB) / (1.0 + numpy.exp(-fApB))
else:
p = 1.0 / (1.0 + numpy.exp(fApB))
if self.classes.index(predictionVec.label) == 1:
return [p, predictionVec.label]
else:
return [1-p, predictionVec.label]
def approximate_AB_for_plat_prob(self, deci, label, prior1, prior0):
""" Approximate the distribution of both classes
:param deci: List of decision made for each sample.
:type deci: list of float
:param label: List of labels from the previous samples.
:type label: list of bool
:param prior1: Number of samples of class 1
:type prior1: int
:param prior0: Number of samples of class 0
:type prior0: int
:rtype: [float, float] - ([A, B] - parameters of sigmoid)
"""
# Parameter setting
maxiter = 100
# Maximum number of iterations
minstep = 1e-10
# Minimum step taken in line search
sigma = 1e-12
# Set to any value > 0
# Construct initial values: target support in array t,
# initial function value in fval
hiTarget = (prior1 + 1.0) / (prior1 + 2.0)
loTarget = 1 / (prior0 + 2.0)
length = prior1 + prior0 # Total number of data
t = numpy.zeros(length)
for i in numpy.arange(length):
if label[i] > 0:
t[i] = hiTarget
else:
t[i] = loTarget
A = 0.0
B = numpy.log((prior0 + 1.0) / (prior1 + 1.0))
fval = 0.0
for i in numpy.arange(length):
fApB = deci[i] * A + B
if fApB >= 0:
fval += t[i] * fApB + numpy.log(1 + numpy.exp(-fApB))
else:
fval += (t[i] - 1) * fApB + numpy.log(1 + numpy.exp(fApB))
for it in numpy.arange(maxiter):
# Update Gradient and Hessian (use H' = H + sigma 1)
h11 = h22 = sigma
h21 = g1 = g2 = 0.0
for i in numpy.arange(length):
fApB = deci[i] * A + B
if fApB >= 0:
p = numpy.exp(-fApB) / (1.0 + numpy.exp(-fApB))
q = 1.0 / (1.0 + numpy.exp(-fApB))
else:
p = 1.0 / (1.0 + numpy.exp(fApB))
q = numpy.exp(fApB) / (1.0 + numpy.exp(fApB))
d2 = p * q
h11 += deci[i] * deci[i] * d2
h22 += d2
h21 += deci[i] * d2
d1 = t[i] - p
g1 += deci[i] * d1
g2 += d1
if abs(g1) < 1e-5 and abs(g2) < 1e-5: # Stopping criteria
break
# Compute modified Newton directions
det = h11 * h22 - h21 * h21
dA = -(h22 * g1 - h21 * g2) / det
dB = -(-h21 * g1 + h11 * g2) / det
gd = g1 * dA + g2 * dB
stepsize = 1
while stepsize >= minstep: # Line search
newA = A + stepsize * dA
newB = B + stepsize * dB
newf = 0.0
for i in numpy.arange(length):
fApB = deci[i] * newA + newB
if fApB >= 0:
newf += t[i] * fApB + numpy.log(1 + numpy.exp(-fApB))
else:
newf += (t[i] - 1) * fApB + \
numpy.log(1 + numpy.exp(fApB))
if newf < fval + 0.0001 * stepsize * gd:
A = newA
B = newB
fval = newf
break # Sufficient decrease satisfied
else:
stepsize /= 2.0
if stepsize < minstep:
self._log(
"Line search fails. A= " + str(A) + " B= " + str(B),
level=logging.WARNING)
break
if it >= maxiter:
self._log("Reaching maximal iterations", level=logging.WARNING)
return [A, B]
# ------------------------------------------------------------------------------
# plot routines
# ------------------------------------------------------------------------------
def __intersect(self, rect, line):
""" Calculate the points of a line in a given rectangle
:param rect: Parameters of a rectangle (min x, min y, max x, max y).
:type rect: list of float
:param line: line given as y=a*x+b or a*x+b*y+c=0
:type line: list of float
:rtype: list of pairs of float
"""
l = []
xmin, xmax, ymin, ymax = rect
a, b, c = line
assert a != 0 or b != 0
if a == 0:
y = -c/b
if y <= ymax and y >= ymin:
l.append((xmin, y))
l.append((xmax, y))
return l
if b == 0:
x = -c/a
if x <= xmax and x >= xmin:
l.append((x, ymin))
l.append((x, ymax))
return l
k = -a / b
m = -c / b
for x in (xmin, xmax):
y = k * x + m
if y <= ymax and y >= ymin:
l.append((x,y))
k = -b / a
m = -c / a
for y in (ymin, ymax):
x = k * y + m
if x < xmax and x > xmin:
l.append((x, y))
return l
def plot_line(self, coef, *args, **kwargs):
""" Plot a line (y=a*x+b or a*x+b*y+c=0) with the given coefficients
:param coef: Coefficients determining the line
:type coef: list of floats
:rtype: list of lines
"""
coef = numpy.float64(coef[:])
assert len(coef) == 2 or len(coef) == 3
if len(coef) == 2:
a, b, c = coef[0], -1., coef[1]
elif len(coef) == 3:
a, b, c = coef
ax = plt.gca()
limits = ax.axis()
points = self.__intersect(limits, (a,b,c))
if len(points) == 2:
pts = numpy.array(points)
l = ax.plot(pts[:, 0], pts[:, 1], *args, **kwargs)
ax.axis(limits)
return l
return None
def circle_out(self, x, y, s=20, *args, **kwargs):
""" Circle out points with size 's'.
:param x: x coordinates.
:type x: list of float
:param y: y coordinates.
:type y: list of float
:param s: Size of circle
:tyep s: int
"""
ax = plt.gca()
x = [item for sublist in x for item in sublist]
y = [item for sublist in y for item in sublist]
if 'edgecolors' not in kwargs:
kwargs['edgecolors'] = 'g'
self.scat = ax.scatter(x, y, s, facecolors='none', *args, **kwargs)
def plot_data(self, x, y, target, s=20, *args, **kwargs):
""" Plot points with size 's'
:param x: x coordinates.
:type x: list of float
:param y: y coordinates.
:type y: list of float
:param target: Determine class label.
:type target: bool
:param s: Size of point.
:type s: int
"""
ax = plt.gca()
x = [item for sublist in x for item in sublist]
y = [item for sublist in y for item in sublist]
if 'edgecolors' not in kwargs:
if target == True:
kwargs['edgecolors'] = 'r'
self.scatTarget = ax.scatter(x, y, s, marker='x',\
facecolors='none',\
*args, **kwargs)
else:
kwargs['edgecolors'] = 'b'
self.scatStandard = ax.scatter(x, y, s, marker='o',\
facecolors='none',\
*args, **kwargs)
def plot_hyperplane(self):
""" Plot the hyperplane (in 2D a line).
"""
ax = plt.gca()
ax.set_title("$wx + b = 0$\n$[%.4f; %.4f]x + %.4f = 0$"\
% (self.w[0], self.w[1], self.b))
coef = [self.w[0], self.w[1], self.b]
coef1 = coef[:]
coef2 = coef[:]
coef1[2] += 1
coef2[2] -= 1
i = 0
for _, line in enumerate(ax.lines):
ax.lines.remove(line)
i += 1
if i != 3:
if self.show_plot:
from time import sleep
sleep(0.25)
for _, line in enumerate(ax.lines):
ax.lines.remove(line)
i += 1
self.plot_line(coef, 'b', lw=2)
self.plot_line(coef1, 'g', lw=1, ls='dashed')
self.plot_line(coef2, 'r', lw=1, ls='dashed')
def plot_samples(self):
""" Plot all training samples.
Plot all training samples and mark the class association.
"""
class_neg = []
class_pos = []
for idx in numpy.arange(self.num_samples):
if self.labels[idx] == 0:
class_neg.append(self.samples[idx])
else:
class_pos.append(self.samples[idx])
class_neg = numpy.matrix(class_neg)
class_pos = numpy.matrix(class_pos)
if self.scatStandard is not None:
self.scatStandard.remove()
self.scatStandard = None
if self.scatTarget is not None:
self.scatTarget.remove()
self.scatTarget = None
# TODO: determine size of plot
xmin = -2.5 #min(numpy.min(class_neg[:,0]), numpy.min(class_pos[:,0]))
xmax = 2.5 #max(numpy.max(class_neg[:,0]), numpy.max(class_pos[:,0]))
ymin = -2.5 #min(numpy.min(class_neg[:,1]), numpy.min(class_pos[:,1]))
ymax = 2.5 #max(numpy.max(class_neg[:,1]), numpy.max(class_pos[:,1]))
ax = plt.gca()
ax.axis([xmin-1.0, xmax+1.0, ymin-1.0, ymax+1.0])
if numpy.shape(class_neg)[1] > 0:
self.plot_data(class_neg[:, 0], class_neg[:, 1], False)
if numpy.shape(class_pos)[1] > 0:
self.plot_data(class_pos[:, 0], class_pos[:, 1], True)
def plot_support_vectors(self):
""" Mark the support vectors by a circle.
"""
support_vectors = []
for idx in numpy.arange(self.num_samples):
if self.dual_solution[idx] != 0:
support_vectors.append(self.samples[idx])
support_vectors = numpy.matrix(support_vectors)
if self.scat is not None:
self.scat.remove()
if support_vectors is not None and\
numpy.shape(support_vectors)[0] > 1 and\
numpy.shape(support_vectors)[1] > 0:
self.circle_out(support_vectors[:, 0], support_vectors[:, 1], s=100)
else:
self.scat = None
def plot_class_borders(self, mStandard, mTarget, R,
scaleFactorSmall, scaleFactorTall):
""" Plot the borders of each class.
:param mStandard: Center of standard class.
:type mStandard: [float, float] - (x,y)
:param mTarget: Center of target class.
:type mTarget: [float, float] - (x,y)
:param R: Distance between both centers.
:type R: float
:param scaleFactorSmall: Determine inner circle of class border.
:type scaleFactorSmall: float
:param scaleFactorTall: Determine outer circle of class border.
:type scaleFactorTall: float
"""
ax = plt.gca()
if self.circleStandard0 is not None:
self.circleStandard0.remove()
if self.circleStandard1 is not None:
self.circleStandard1.remove()
if self.circleTarget0 is not None:
self.circleTarget0.remove()
if self.circleTarget1 is not None:
self.circleTarget1.remove()
self.circleStandard0 = plt.Circle(
mStandard, radius=scaleFactorSmall * R, color='b', fill=False)
self.circleStandard1 = plt.Circle(
mStandard, radius=scaleFactorTall * R, color='b', fill=False)
self.circleTarget0 = plt.Circle(
mTarget, radius=scaleFactorSmall * R, color='r', fill=False)
self.circleTarget1 = plt.Circle(
mTarget, radius=scaleFactorTall * R, color='r', fill=False)
ax.add_patch(self.circleStandard0)
ax.add_patch(self.circleStandard1)
ax.add_patch(self.circleTarget0)
ax.add_patch(self.circleTarget1)
def plot_data_3D(self, x, y, z, target, s=20, *args, **kwargs):
""" Plot points with size 's'
:param x: x coordinates.
:type x: list of float
:param y: y coordinates.
:type y: list of float
:param z: z coordinates:
:type z: list of float
:param target: Determine class label.
:type target: bool
:param s: Size of point.
:type s: int
"""
ax = plt.gca(projection='3d')
x = [item for sublist in x for item in sublist]
y = [item for sublist in y for item in sublist]
z = [item for sublist in z for item in sublist]
if 'edgecolors' not in kwargs:
if target:
self.scatTarget = ax.scatter(x, y, z, c='r', marker='o')
else:
self.scatStandard = ax.scatter(x, y, z, c='g', marker='x')
def plot_samples_3D(self):
""" Plot all training samples.
Plot all training samples and mark the class association.
"""
ax = plt.gca(projection='3d')#generate 3d plot
# TODO: determine size of plot
xmin = -2.5 # min(numpy.min(class_neg[:,0]), numpy.min(class_pos[:,0]))
xmax = 2.5 # max(numpy.max(class_neg[:,0]), numpy.max(class_pos[:,0]))
ymin = -2.5 # min(numpy.min(class_neg[:,1]), numpy.min(class_pos[:,1]))
ymax = 2.5 # max(numpy.max(class_neg[:,1]), numpy.max(class_pos[:,1]))
zmin = -2.5
zmax = 2.5
ax.set_xlim3d(xmin, xmax)
ax.set_ylim3d(ymin, ymax)
ax.set_zlim3d(zmin, zmax)
class_neg = []
class_pos = []
for idx in numpy.arange(self.num_samples):
if self.labels[idx] == 0:
class_neg.append(self.samples[idx])
else:
class_pos.append(self.samples[idx])
class_neg = numpy.matrix(class_neg)
class_pos = numpy.matrix(class_pos)
if self.scatStandard is not None:
self.scatStandard.remove()
self.scatStandard = None
if self.scatTarget is not None:
self.scatTarget.remove()
self.scatTarget = None
if numpy.shape(class_neg)[1] > 0:
self.plot_data_3D(
class_neg[:, 0], class_neg[:, 1], class_neg[:, 2], False)
if numpy.shape(class_pos)[1] > 0:
self.plot_data_3D(
class_pos[:, 0], class_pos[:, 1], class_pos[:, 2], True)
def plot_hyperplane_3D(self):
""" Plot the hyperplane (in 3D a surface).
"""
ax = plt.gca(projection='3d')
ax.set_title("$wx + b = 0$\n$[%.4f; %.4f; %.4f]x + %.4f = 0$"\
% (self.w[0], self.w[1], self.w[2], self.b))
if self.surf is not None:
self.surf.remove()
self.surf = None
# create x,y
xx, yy = numpy.meshgrid(numpy.arange(-2.0, 2.0, 0.05),\
numpy.arange(-2.0, 2.0, 0.05))
# calculate corresponding z
z = (-self.w[0] * xx - self.w[1] * yy - self.b) * 1. / self.w[2]
self.surf = ax.plot_surface(xx, yy, z, alpha=0.2)
def visualize(self):
""" Show the training samples, the support vectors if possible and the
current decision function
"""
raise NotImplementedError("The node %s does not implement a"+ \
"visualization." % self.__class__.__name__)
# ------------------------------------------------------------------------------
class TimeoutException(Exception):
""" Break up for to long simplex iterations """
pass
| gpl-3.0 |
Odingod/mne-python | mne/viz/raw.py | 2 | 30957 | """Functions to plot raw M/EEG data
"""
from __future__ import print_function
# Authors: Eric Larson <larson.eric.d@gmail.com>
#
# License: Simplified BSD
import copy
from functools import partial
import numpy as np
from ..externals.six import string_types
from ..io.pick import pick_types
from ..io.proj import setup_proj
from ..utils import set_config, get_config, verbose
from ..time_frequency import compute_raw_psd
from .utils import figure_nobar, _toggle_options, _toggle_proj, tight_layout
from ..defaults import _handle_default
def _plot_update_raw_proj(params, bools):
"""Helper only needs to be called when proj is changed"""
if bools is not None:
inds = np.where(bools)[0]
params['info']['projs'] = [copy.deepcopy(params['projs'][ii])
for ii in inds]
params['proj_bools'] = bools
params['projector'], _ = setup_proj(params['info'], add_eeg_ref=False,
verbose=False)
_update_raw_data(params)
params['plot_fun']()
def _update_raw_data(params):
"""Helper only needs to be called when time or proj is changed"""
from scipy.signal import filtfilt
start = params['t_start']
stop = params['raw'].time_as_index(start + params['duration'])[0]
start = params['raw'].time_as_index(start)[0]
data_picks = pick_types(params['raw'].info, meg=True, eeg=True)
data, times = params['raw'][:, start:stop]
if params['projector'] is not None:
data = np.dot(params['projector'], data)
# remove DC
if params['remove_dc'] is True:
data -= np.mean(data, axis=1)[:, np.newaxis]
if params['ba'] is not None:
data[data_picks] = filtfilt(params['ba'][0], params['ba'][1],
data[data_picks], axis=1, padlen=0)
# scale
for di in range(data.shape[0]):
data[di] /= params['scalings'][params['types'][di]]
# stim channels should be hard limited
if params['types'][di] == 'stim':
data[di] = np.minimum(data[di], 1.0)
# clip
if params['clipping'] == 'transparent':
data[np.logical_or(data > 1, data < -1)] = np.nan
elif params['clipping'] == 'clamp':
data = np.clip(data, -1, 1, data)
params['data'] = data
params['times'] = times
def _layout_raw(params):
"""Set raw figure layout"""
s = params['fig'].get_size_inches()
scroll_width = 0.33
hscroll_dist = 0.33
vscroll_dist = 0.1
l_border = 1.2
r_border = 0.1
t_border = 0.33
b_border = 0.5
# only bother trying to reset layout if it's reasonable to do so
if s[0] < 2 * scroll_width or s[1] < 2 * scroll_width + hscroll_dist:
return
# convert to relative units
scroll_width_x = scroll_width / s[0]
scroll_width_y = scroll_width / s[1]
vscroll_dist /= s[0]
hscroll_dist /= s[1]
l_border /= s[0]
r_border /= s[0]
t_border /= s[1]
b_border /= s[1]
# main axis (traces)
ax_width = 1.0 - scroll_width_x - l_border - r_border - vscroll_dist
ax_y = hscroll_dist + scroll_width_y + b_border
ax_height = 1.0 - ax_y - t_border
params['ax'].set_position([l_border, ax_y, ax_width, ax_height])
# vscroll (channels)
pos = [ax_width + l_border + vscroll_dist, ax_y,
scroll_width_x, ax_height]
params['ax_vscroll'].set_position(pos)
# hscroll (time)
pos = [l_border, b_border, ax_width, scroll_width_y]
params['ax_hscroll'].set_position(pos)
# options button
pos = [l_border + ax_width + vscroll_dist, b_border,
scroll_width_x, scroll_width_y]
params['ax_button'].set_position(pos)
params['fig'].canvas.draw()
def _helper_resize(event, params):
"""Helper for resizing"""
size = ','.join([str(s) for s in params['fig'].get_size_inches()])
set_config('MNE_BROWSE_RAW_SIZE', size)
_layout_raw(params)
def _pick_bad_channels(event, params):
"""Helper for selecting / dropping bad channels onpick"""
bads = params['raw'].info['bads']
# trade-off, avoid selecting more than one channel when drifts are present
# however for clean data don't click on peaks but on flat segments
def f(x, y):
return y(np.mean(x), x.std() * 2)
for l in event.inaxes.lines:
ydata = l.get_ydata()
if not isinstance(ydata, list) and not np.isnan(ydata).any():
ymin, ymax = f(ydata, np.subtract), f(ydata, np.add)
if ymin <= event.ydata <= ymax:
this_chan = vars(l)['ch_name']
if this_chan in params['raw'].ch_names:
if this_chan not in bads:
bads.append(this_chan)
l.set_color(params['bad_color'])
l.set_zorder(-1)
else:
bads.pop(bads.index(this_chan))
l.set_color(vars(l)['def_color'])
l.set_zorder(0)
break
else:
x = np.array([event.xdata] * 2)
params['ax_vertline'].set_data(x, np.array(params['ax'].get_ylim()))
params['ax_hscroll_vertline'].set_data(x, np.array([0., 1.]))
params['vertline_t'].set_text('%0.3f' % x[0])
# update deep-copied info to persistently draw bads
params['info']['bads'] = bads
_plot_update_raw_proj(params, None)
def _mouse_click(event, params):
"""Vertical select callback"""
if event.inaxes is None or event.button != 1:
return
# vertical scrollbar changed
if event.inaxes == params['ax_vscroll']:
ch_start = max(int(event.ydata) - params['n_channels'] // 2, 0)
if params['ch_start'] != ch_start:
params['ch_start'] = ch_start
params['plot_fun']()
# horizontal scrollbar changed
elif event.inaxes == params['ax_hscroll']:
_plot_raw_time(event.xdata - params['duration'] / 2, params)
elif event.inaxes == params['ax']:
_pick_bad_channels(event, params)
def _plot_raw_time(value, params):
"""Deal with changed time value"""
info = params['info']
max_times = params['n_times'] / float(info['sfreq']) - params['duration']
if value > max_times:
value = params['n_times'] / info['sfreq'] - params['duration']
if value < 0:
value = 0
if params['t_start'] != value:
params['t_start'] = value
params['hsel_patch'].set_x(value)
_update_raw_data(params)
params['plot_fun']()
def _plot_raw_onkey(event, params):
"""Interpret key presses"""
import matplotlib.pyplot as plt
# check for initial plot
if event is None:
params['plot_fun']()
return
# quit event
if event.key == 'escape':
plt.close(params['fig'])
return
# change plotting params
ch_changed = False
if event.key == 'down':
params['ch_start'] += params['n_channels']
ch_changed = True
elif event.key == 'up':
params['ch_start'] -= params['n_channels']
ch_changed = True
elif event.key == 'right':
_plot_raw_time(params['t_start'] + params['duration'], params)
return
elif event.key == 'left':
_plot_raw_time(params['t_start'] - params['duration'], params)
return
elif event.key in ['o', 'p']:
_toggle_options(None, params)
return
# deal with plotting changes
if ch_changed:
_channels_changed(params)
def _channels_changed(params):
if params['ch_start'] >= len(params['info']['ch_names']):
params['ch_start'] = 0
elif params['ch_start'] < 0:
# wrap to end
rem = len(params['info']['ch_names']) % params['n_channels']
params['ch_start'] = len(params['info']['ch_names'])
params['ch_start'] -= rem if rem != 0 else params['n_channels']
params['plot_fun']()
def _plot_raw_onscroll(event, params):
"""Interpret scroll events"""
orig_start = params['ch_start']
if event.step < 0:
params['ch_start'] = min(params['ch_start'] + params['n_channels'],
len(params['info']['ch_names']) -
params['n_channels'])
else: # event.key == 'up':
params['ch_start'] = max(params['ch_start'] - params['n_channels'], 0)
if orig_start != params['ch_start']:
_channels_changed(params)
def _plot_traces(params, inds, color, bad_color, lines, event_lines,
event_color, offsets):
"""Helper for plotting raw"""
info = params['info']
n_channels = params['n_channels']
params['bad_color'] = bad_color
# do the plotting
tick_list = []
for ii in range(n_channels):
ch_ind = ii + params['ch_start']
# let's be generous here and allow users to pass
# n_channels per view >= the number of traces available
if ii >= len(lines):
break
elif ch_ind < len(info['ch_names']):
# scale to fit
ch_name = info['ch_names'][inds[ch_ind]]
tick_list += [ch_name]
offset = offsets[ii]
# do NOT operate in-place lest this get screwed up
this_data = params['data'][inds[ch_ind]]
this_color = bad_color if ch_name in info['bads'] else color
this_z = -1 if ch_name in info['bads'] else 0
if isinstance(this_color, dict):
this_color = this_color[params['types'][inds[ch_ind]]]
# subtraction here gets corect orientation for flipped ylim
lines[ii].set_ydata(offset - this_data)
lines[ii].set_xdata(params['times'])
lines[ii].set_color(this_color)
lines[ii].set_zorder(this_z)
vars(lines[ii])['ch_name'] = ch_name
vars(lines[ii])['def_color'] = color[params['types'][inds[ch_ind]]]
else:
# "remove" lines
lines[ii].set_xdata([])
lines[ii].set_ydata([])
# deal with event lines
if params['event_times'] is not None:
# find events in the time window
event_times = params['event_times']
mask = np.logical_and(event_times >= params['times'][0],
event_times <= params['times'][-1])
event_times = event_times[mask]
event_nums = params['event_nums'][mask]
# plot them with appropriate colors
# go through the list backward so we end with -1, the catchall
used = np.zeros(len(event_times), bool)
for ev_num, line in zip(sorted(event_color.keys())[::-1],
event_lines[::-1]):
mask = (event_nums == ev_num) if ev_num >= 0 else ~used
assert not np.any(used[mask])
used[mask] = True
t = event_times[mask]
if len(t) > 0:
xs = list()
ys = list()
for tt in t:
xs += [tt, tt, np.nan]
ys += [0, 2 * n_channels + 1, np.nan]
line.set_xdata(xs)
line.set_ydata(ys)
else:
line.set_xdata([])
line.set_ydata([])
# finalize plot
params['ax'].set_xlim(params['times'][0],
params['times'][0] + params['duration'], False)
params['ax'].set_yticklabels(tick_list)
params['vsel_patch'].set_y(params['ch_start'])
params['fig'].canvas.draw()
# XXX This is a hack to make sure this figure gets drawn last
# so that when matplotlib goes to calculate bounds we don't get a
# CGContextRef error on the MacOSX backend :(
if params['fig_proj'] is not None:
params['fig_proj'].canvas.draw()
def plot_raw(raw, events=None, duration=10.0, start=0.0, n_channels=None,
bgcolor='w', color=None, bad_color=(0.8, 0.8, 0.8),
event_color='cyan', scalings=None, remove_dc=True, order='type',
show_options=False, title=None, show=True, block=False,
highpass=None, lowpass=None, filtorder=4, clipping=None):
"""Plot raw data
Parameters
----------
raw : instance of Raw
The raw data to plot.
events : array | None
Events to show with vertical bars.
duration : float
Time window (sec) to plot in a given time.
start : float
Initial time to show (can be changed dynamically once plotted).
n_channels : int
Number of channels to plot at once.
bgcolor : color object
Color of the background.
color : dict | color object | None
Color for the data traces. If None, defaults to::
dict(mag='darkblue', grad='b', eeg='k', eog='k', ecg='r',
emg='k', ref_meg='steelblue', misc='k', stim='k',
resp='k', chpi='k')
bad_color : color object
Color to make bad channels.
event_color : color object | dict
Color to use for events. Can also be a dict with
``{event_number: color}`` pairings. Use ``event_number==-1`` for
any event numbers in the events list that are not in the dictionary.
scalings : dict | None
Scale factors for the traces. If None, defaults to::
dict(mag=1e-12, grad=4e-11, eeg=20e-6, eog=150e-6, ecg=5e-4,
emg=1e-3, ref_meg=1e-12, misc=1e-3, stim=1,
resp=1, chpi=1e-4)
remove_dc : bool
If True remove DC component when plotting data.
order : 'type' | 'original' | array
Order in which to plot data. 'type' groups by channel type,
'original' plots in the order of ch_names, array gives the
indices to use in plotting.
show_options : bool
If True, a dialog for options related to projection is shown.
title : str | None
The title of the window. If None, and either the filename of the
raw object or '<unknown>' will be displayed as title.
show : bool
Show figure if True.
block : bool
Whether to halt program execution until the figure is closed.
Useful for setting bad channels on the fly by clicking on a line.
May not work on all systems / platforms.
highpass : float | None
Highpass to apply when displaying data.
lowpass : float | None
Lowpass to apply when displaying data.
filtorder : int
Filtering order. Note that for efficiency and simplicity,
filtering during plotting uses forward-backward IIR filtering,
so the effective filter order will be twice ``filtorder``.
Filtering the lines for display may also produce some edge
artifacts (at the left and right edges) of the signals
during display. Filtering requires scipy >= 0.10.
clipping : str | None
If None, channels are allowed to exceed their designated bounds in
the plot. If "clamp", then values are clamped to the appropriate
range for display, creating step-like artifacts. If "transparent",
then excessive values are not shown, creating gaps in the traces.
Returns
-------
fig : Instance of matplotlib.figure.Figure
Raw traces.
Notes
-----
The arrow keys (up/down/left/right) can typically be used to navigate
between channels and time ranges, but this depends on the backend
matplotlib is configured to use (e.g., mpl.use('TkAgg') should work).
To mark or un-mark a channel as bad, click on the rather flat segments
of a channel's time series. The changes will be reflected immediately
in the raw object's ``raw.info['bads']`` entry.
"""
import matplotlib.pyplot as plt
import matplotlib as mpl
from scipy.signal import butter
color = _handle_default('color', color)
scalings = _handle_default('scalings_plot_raw', scalings)
if clipping is not None and clipping not in ('clamp', 'transparent'):
raise ValueError('clipping must be None, "clamp", or "transparent", '
'not %s' % clipping)
# figure out the IIR filtering parameters
nyq = raw.info['sfreq'] / 2.
if highpass is None and lowpass is None:
ba = None
else:
filtorder = int(filtorder)
if filtorder <= 0:
raise ValueError('filtorder (%s) must be >= 1' % filtorder)
if highpass is not None and highpass <= 0:
raise ValueError('highpass must be > 0, not %s' % highpass)
if lowpass is not None and lowpass >= nyq:
raise ValueError('lowpass must be < nyquist (%s), not %s'
% (nyq, lowpass))
if highpass is None:
ba = butter(filtorder, lowpass / nyq, 'lowpass', analog=False)
elif lowpass is None:
ba = butter(filtorder, highpass / nyq, 'highpass', analog=False)
else:
if lowpass <= highpass:
raise ValueError('lowpass (%s) must be > highpass (%s)'
% (lowpass, highpass))
ba = butter(filtorder, [highpass / nyq, lowpass / nyq], 'bandpass',
analog=False)
# make a copy of info, remove projection (for now)
info = copy.deepcopy(raw.info)
projs = info['projs']
info['projs'] = []
n_times = raw.n_times
# allow for raw objects without filename, e.g., ICA
if title is None:
title = raw._filenames
if len(title) == 0: # empty list or absent key
title = '<unknown>'
elif len(title) == 1:
title = title[0]
else: # if len(title) > 1:
title = '%s ... (+ %d more) ' % (title[0], len(title) - 1)
if len(title) > 60:
title = '...' + title[-60:]
elif not isinstance(title, string_types):
raise TypeError('title must be None or a string')
if events is not None:
event_times = events[:, 0].astype(float) - raw.first_samp
event_times /= info['sfreq']
event_nums = events[:, 2]
else:
event_times = event_nums = None
# reorganize the data in plotting order
inds = list()
types = list()
for t in ['grad', 'mag']:
inds += [pick_types(info, meg=t, ref_meg=False, exclude=[])]
types += [t] * len(inds[-1])
pick_kwargs = dict(meg=False, ref_meg=False, exclude=[])
for t in ['eeg', 'eog', 'ecg', 'emg', 'ref_meg', 'stim', 'resp',
'misc', 'chpi', 'syst', 'ias', 'exci']:
pick_kwargs[t] = True
inds += [pick_types(raw.info, **pick_kwargs)]
types += [t] * len(inds[-1])
pick_kwargs[t] = False
inds = np.concatenate(inds).astype(int)
if not len(inds) == len(info['ch_names']):
raise RuntimeError('Some channels not classified, please report '
'this problem')
# put them back to original or modified order for natral plotting
reord = np.argsort(inds)
types = [types[ri] for ri in reord]
if isinstance(order, str):
if order == 'original':
inds = inds[reord]
elif order != 'type':
raise ValueError('Unknown order type %s' % order)
elif isinstance(order, np.ndarray):
if not np.array_equal(np.sort(order),
np.arange(len(info['ch_names']))):
raise ValueError('order, if array, must have integers from '
'0 to n_channels - 1')
# put back to original order first, then use new order
inds = inds[reord][order]
if not isinstance(event_color, dict):
event_color = {-1: event_color}
else:
event_color = copy.deepcopy(event_color) # we might modify it
for key in event_color:
if not isinstance(key, int):
raise TypeError('event_color key "%s" was a %s not an int'
% (key, type(key)))
if key <= 0 and key != -1:
raise KeyError('only key <= 0 allowed is -1 (cannot use %s)'
% key)
# set up projection and data parameters
params = dict(raw=raw, ch_start=0, t_start=start, duration=duration,
info=info, projs=projs, remove_dc=remove_dc, ba=ba,
n_channels=n_channels, scalings=scalings, types=types,
n_times=n_times, event_times=event_times,
event_nums=event_nums, clipping=clipping, fig_proj=None)
# set up plotting
size = get_config('MNE_BROWSE_RAW_SIZE')
if size is not None:
size = size.split(',')
size = tuple([float(s) for s in size])
# have to try/catch when there's no toolbar
fig = figure_nobar(facecolor=bgcolor, figsize=size)
fig.canvas.set_window_title('mne_browse_raw')
ax = plt.subplot2grid((10, 10), (0, 0), colspan=9, rowspan=9)
ax.set_title(title, fontsize=12)
ax_hscroll = plt.subplot2grid((10, 10), (9, 0), colspan=9)
ax_hscroll.get_yaxis().set_visible(False)
ax_hscroll.set_xlabel('Time (s)')
ax_vscroll = plt.subplot2grid((10, 10), (0, 9), rowspan=9)
ax_vscroll.set_axis_off()
ax_button = plt.subplot2grid((10, 10), (9, 9))
# store these so they can be fixed on resize
params['fig'] = fig
params['ax'] = ax
params['ax_hscroll'] = ax_hscroll
params['ax_vscroll'] = ax_vscroll
params['ax_button'] = ax_button
# populate vertical and horizontal scrollbars
for ci in range(len(info['ch_names'])):
this_color = (bad_color if info['ch_names'][inds[ci]] in info['bads']
else color)
if isinstance(this_color, dict):
this_color = this_color[types[inds[ci]]]
ax_vscroll.add_patch(mpl.patches.Rectangle((0, ci), 1, 1,
facecolor=this_color,
edgecolor=this_color))
vsel_patch = mpl.patches.Rectangle((0, 0), 1, n_channels, alpha=0.5,
facecolor='w', edgecolor='w')
ax_vscroll.add_patch(vsel_patch)
params['vsel_patch'] = vsel_patch
hsel_patch = mpl.patches.Rectangle((start, 0), duration, 1, edgecolor='k',
facecolor=(0.75, 0.75, 0.75),
alpha=0.25, linewidth=1, clip_on=False)
ax_hscroll.add_patch(hsel_patch)
params['hsel_patch'] = hsel_patch
ax_hscroll.set_xlim(0, n_times / float(info['sfreq']))
n_ch = len(info['ch_names'])
ax_vscroll.set_ylim(n_ch, 0)
ax_vscroll.set_title('Ch.')
# make shells for plotting traces
offsets = np.arange(n_channels) * 2 + 1
ylim = [n_channels * 2 + 1, 0]
ax.set_yticks(offsets)
ax.set_ylim(ylim)
# plot event_line first so it's in the back
event_lines = [ax.plot([np.nan], color=event_color[ev_num])[0]
for ev_num in sorted(event_color.keys())]
lines = [ax.plot([np.nan], antialiased=False, linewidth=0.5)[0]
for _ in range(n_ch)]
ax.set_yticklabels(['X' * max([len(ch) for ch in info['ch_names']])])
vertline_color = (0., 0.75, 0.)
params['ax_vertline'] = ax.plot([0, 0], ylim, color=vertline_color,
zorder=-1)[0]
params['ax_vertline'].ch_name = ''
params['vertline_t'] = ax_hscroll.text(0, 0.5, '0.000',
color=vertline_color,
verticalalignment='center',
horizontalalignment='right')
params['ax_hscroll_vertline'] = ax_hscroll.plot([0, 0], [0, 1],
color=vertline_color,
zorder=1)[0]
params['plot_fun'] = partial(_plot_traces, params=params, inds=inds,
color=color, bad_color=bad_color, lines=lines,
event_lines=event_lines,
event_color=event_color, offsets=offsets)
# set up callbacks
opt_button = None
if len(raw.info['projs']) > 0:
opt_button = mpl.widgets.Button(ax_button, 'Proj')
callback_option = partial(_toggle_options, params=params)
opt_button.on_clicked(callback_option)
callback_key = partial(_plot_raw_onkey, params=params)
fig.canvas.mpl_connect('key_press_event', callback_key)
callback_scroll = partial(_plot_raw_onscroll, params=params)
fig.canvas.mpl_connect('scroll_event', callback_scroll)
callback_pick = partial(_mouse_click, params=params)
fig.canvas.mpl_connect('button_press_event', callback_pick)
callback_resize = partial(_helper_resize, params=params)
fig.canvas.mpl_connect('resize_event', callback_resize)
# As here code is shared with plot_evoked, some extra steps:
# first the actual plot update function
params['plot_update_proj_callback'] = _plot_update_raw_proj
# then the toggle handler
callback_proj = partial(_toggle_proj, params=params)
# store these for use by callbacks in the options figure
params['callback_proj'] = callback_proj
params['callback_key'] = callback_key
# have to store this, or it could get garbage-collected
params['opt_button'] = opt_button
# do initial plots
callback_proj('none')
_layout_raw(params)
# deal with projectors
if show_options is True:
_toggle_options(None, params)
if show:
try:
plt.show(block=block)
except TypeError: # not all versions have this
plt.show()
return fig
def _set_psd_plot_params(info, proj, picks, ax, area_mode):
"""Aux function"""
import matplotlib.pyplot as plt
if area_mode not in [None, 'std', 'range']:
raise ValueError('"area_mode" must be "std", "range", or None')
if picks is None:
if ax is not None:
raise ValueError('If "ax" is not supplied (None), then "picks" '
'must also be supplied')
megs = ['mag', 'grad', False]
eegs = [False, False, True]
names = ['Magnetometers', 'Gradiometers', 'EEG']
picks_list = list()
titles_list = list()
for meg, eeg, name in zip(megs, eegs, names):
picks = pick_types(info, meg=meg, eeg=eeg, ref_meg=False)
if len(picks) > 0:
picks_list.append(picks)
titles_list.append(name)
if len(picks_list) == 0:
raise RuntimeError('No MEG or EEG channels found')
else:
picks_list = [picks]
titles_list = ['Selected channels']
ax_list = [ax]
make_label = False
fig = None
if ax is None:
fig = plt.figure()
ax_list = list()
for ii in range(len(picks_list)):
# Make x-axes change together
if ii > 0:
ax_list.append(plt.subplot(len(picks_list), 1, ii + 1,
sharex=ax_list[0]))
else:
ax_list.append(plt.subplot(len(picks_list), 1, ii + 1))
make_label = True
else:
fig = ax_list[0].get_figure()
return fig, picks_list, titles_list, ax_list, make_label
@verbose
def plot_raw_psd(raw, tmin=0., tmax=np.inf, fmin=0, fmax=np.inf, proj=False,
n_fft=2048, picks=None, ax=None, color='black',
area_mode='std', area_alpha=0.33,
n_overlap=0, dB=True, show=True, n_jobs=1, verbose=None):
"""Plot the power spectral density across channels
Parameters
----------
raw : instance of io.Raw
The raw instance to use.
tmin : float
Start time for calculations.
tmax : float
End time for calculations.
fmin : float
Start frequency to consider.
fmax : float
End frequency to consider.
proj : bool
Apply projection.
n_fft : int
Number of points to use in Welch FFT calculations.
picks : array-like of int | None
List of channels to use. Cannot be None if `ax` is supplied. If both
`picks` and `ax` are None, separate subplots will be created for
each standard channel type (`mag`, `grad`, and `eeg`).
ax : instance of matplotlib Axes | None
Axes to plot into. If None, axes will be created.
color : str | tuple
A matplotlib-compatible color to use.
area_mode : str | None
Mode for plotting area. If 'std', the mean +/- 1 STD (across channels)
will be plotted. If 'range', the min and max (across channels) will be
plotted. Bad channels will be excluded from these calculations.
If None, no area will be plotted.
area_alpha : float
Alpha for the area.
n_overlap : int
The number of points of overlap between blocks. The default value
is 0 (no overlap).
dB : bool
If True, transform data to decibels.
show : bool
Show figure if True.
n_jobs : int
Number of jobs to run in parallel.
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
Returns
-------
fig : instance of matplotlib figure
Figure distributing one image per channel across sensor topography.
"""
import matplotlib.pyplot as plt
fig, picks_list, titles_list, ax_list, make_label = _set_psd_plot_params(
raw.info, proj, picks, ax, area_mode)
for ii, (picks, title, ax) in enumerate(zip(picks_list, titles_list,
ax_list)):
psds, freqs = compute_raw_psd(raw, tmin=tmin, tmax=tmax, picks=picks,
fmin=fmin, fmax=fmax, proj=proj,
n_fft=n_fft, n_overlap=n_overlap,
n_jobs=n_jobs, verbose=None)
# Convert PSDs to dB
if dB:
psds = 10 * np.log10(psds)
unit = 'dB'
else:
unit = 'power'
psd_mean = np.mean(psds, axis=0)
if area_mode == 'std':
psd_std = np.std(psds, axis=0)
hyp_limits = (psd_mean - psd_std, psd_mean + psd_std)
elif area_mode == 'range':
hyp_limits = (np.min(psds, axis=0), np.max(psds, axis=0))
else: # area_mode is None
hyp_limits = None
ax.plot(freqs, psd_mean, color=color)
if hyp_limits is not None:
ax.fill_between(freqs, hyp_limits[0], y2=hyp_limits[1],
color=color, alpha=area_alpha)
if make_label:
if ii == len(picks_list) - 1:
ax.set_xlabel('Freq (Hz)')
if ii == len(picks_list) // 2:
ax.set_ylabel('Power Spectral Density (%s/Hz)' % unit)
ax.set_title(title)
ax.set_xlim(freqs[0], freqs[-1])
if make_label:
tight_layout(pad=0.1, h_pad=0.1, w_pad=0.1, fig=fig)
if show is True:
plt.show()
return fig
| bsd-3-clause |
XENON1T/pax | pax/PatternFitter.py | 1 | 18942 | from __future__ import division
from collections import namedtuple
import json
import gzip
import re
import logging
import numpy as np
import numexpr as ne
import matplotlib.pyplot as plt
try:
from matplotlib import _cntr
except ImportError:
print("matplotlib._cntr did not import, confidence tuple generation in PatternFitter disabled")
_cntr = None
from scipy.optimize import fmin_powell
from scipy.ndimage.interpolation import zoom as image_zoom
from pax import utils
from pax.exceptions import CoordinateOutOfRangeException
from pax.datastructure import ConfidenceTuple
# Named tuple for coordinate data storage
# Maybe works faster than dictionary... can always remove later
CoordinateData = namedtuple('CoordinateData', ('minimum', 'maximum', 'n_points', 'point_spacing'))
class PatternFitter(object):
def __init__(self, filename, zoom_factor=1, adjust_to_qe=None, default_errors=None):
"""Initialize a pattern map file from filename.
Format of the file is very similar to InterpolatingMap; a (gzip compressed) json containing:
'coordinate_system' : [['x', (x_min, x_max, n_x)], ['y',...
'map' : [[[valuex1y1pmt1, valuex1y1pmt2, ...], ...], ...]
'name': 'Nice file with maps',
'description': 'Say what the maps are, who you are, your favorite food, etc',
'timestamp': unix epoch seconds timestamp
where x_min is the lowest x coordinate of a point, x_max the highest, n_x the number of points
zoom_factor is factor by which the spatial dimensions of the map will be upsampled.
adjust_to_qe: array of same length as the number of pmts in the map;
we'll adjust the patterns to account for these QEs, upweighing PMTs with higher QEs
Obviously this should be None if map already includes QE effects (e.g. if it is data-derived)!
default_errors: array of the same length as the number of pmts in the map;
This is the default factor which will be applied to obtain the squared systematic errors in the goodness
of fit statistic, as follows:
squared_systematic_errors = (areas_observed * default_errors)**2
"""
self.log = logging.getLogger('PatternFitter')
with gzip.open(utils.data_file_name(filename)) as infile:
json_data = json.loads(infile.read().decode())
self.data = np.array(json_data['map'])
self.log.debug('Loaded pattern file named: %s' % json_data['name'])
self.log.debug('Description:\n ' + re.sub(r'\n', r'\n ', json_data['description']))
self.log.debug('Data shape: %s' % str(self.data.shape))
self.log.debug('Will zoom in by factor %s' % zoom_factor)
self.dimensions = len(json_data['coordinate_system']) # Spatial dimensions (other one is sampling points)
# Zoom the spatial map using linear interpolation, if desired
if zoom_factor != 1:
self.data = image_zoom(self.data, zoom=[zoom_factor] * self.dimensions + [1], order=1)
# Adjust the expected patterns to the PMT's quantum efficiencies, if desired
# No need to re-normalize: will be done in each gof computation anyway
if adjust_to_qe is not None:
self.data *= adjust_to_qe[[np.newaxis] * self.dimensions]
# Store index starts and distances for quick access, assuming uniform grid spacing
self.coordinate_data = []
for dim_i, (name, (start, stop, n_points)) in enumerate(json_data['coordinate_system']):
n_points *= zoom_factor
if not n_points == self.data.shape[dim_i]:
raise ValueError("Map interpretation error: %d points expected along %s, but map is %d points long" % (
n_points, name, self.data.shape[dim_i]))
self.coordinate_data.append(CoordinateData(minimum=start,
maximum=stop,
n_points=n_points,
point_spacing=(stop - start)/(n_points - 1)))
self.log.debug('Coordinate ranges: %s' % ', '.join(['%s-%s (%d points)' % (cd.minimum, cd.maximum, cd.n_points)
for cd in self.coordinate_data]))
# TODO: Technically we should zero the points outside the tpc bounds again:
# some LCE may have leaked into this region due to upsampling... but doesn't matter:
# if it causes a bias, it will push some events who are already far outside the fiducial volume
# even further out.
self.n_points = self.data.shape[-1]
self.default_pmt_selection = np.ones(self.n_points, dtype=np.bool)
if default_errors is None:
default_errors = 0
self.default_errors = default_errors
def expected_pattern(self, coordinates):
"""Returns expected, normalized pattern at coordinates
'Pattern' means: expected fraction of light seen in each PMT, among PMTs included in the map.
Keep in mind you'll have to re-normalize if there are any dead / saturated PMTs...
"""
# Copy is to ensure the map is not modified accidentally... happened once, never again.
pattern = self.data[self.coordinates_to_indices(coordinates) + [slice(None)]].copy()
sum_pattern = pattern.sum()
if sum_pattern == 0:
raise CoordinateOutOfRangeException("Expected light pattern at coordinates %s "
"consists of only zeros!" % str(coordinates))
return pattern / sum_pattern
def compute_gof(self, coordinates, areas_observed,
pmt_selection=None, square_syst_errors=None, statistic='chi2gamma'):
"""Compute goodness of fit at a single coordinate point
:param areas_observed: arraylike of length n_points containing observed area at each point
:param coordinates: arraylike of n_dimensions, coordinates to test
:param pmt_selection: boolean array of length n_points, if False point will be excluded from statistic
:param square_syst_errors: float array of length n_points, systematic error to use for each point
:param statistic: 'chi2' or 'chi2gamma': goodness of fit statistic to use
:return: value of goodness of fit statistic, or float('inf') if coordinates outside of range
"""
return self._compute_gof_base(self.coordinates_to_indices(coordinates), areas_observed,
pmt_selection, square_syst_errors, statistic)
def compute_gof_grid(self, center_coordinates, grid_size, areas_observed,
pmt_selection=None, square_syst_errors=None, statistic='chi2gamma', plot=False):
"""Compute goodness of fit on a grid of points of length grid_size in each coordinate,
centered at center_coordinates. All other parameters like compute_gof.
Returns gof_grid, (index of lowest grid point in dimension 1, ...)
:return:
"""
index_selection = []
lowest_indices = []
for dimension_i, x in enumerate(center_coordinates):
cd = self.coordinate_data[dimension_i]
start = self._coordinate_to_index(max(x - grid_size / 2, cd.minimum),
dimension_i)
lowest_indices.append(start)
stop = self._coordinate_to_index(min(x + grid_size / 2, cd.maximum),
dimension_i)
index_selection.append(slice(start, stop + 1)) # Don't forget python's silly indexing here...
gofs = self._compute_gof_base(index_selection, areas_observed, pmt_selection, square_syst_errors, statistic)
# The below code is for diagnostic plots only
if plot:
plt.figure()
plt.set_cmap('viridis')
# Make the linspaces of coordinates along each dimension
# Remember the grid indices are
q = []
for dimension_i, cd in enumerate(self.coordinate_data):
dimstart = self._index_to_coordinate(index_selection[dimension_i].start, dimension_i)
dimstart -= 0.5 * cd.point_spacing
# stop -1 for python silly indexing again...
dimstop = self._index_to_coordinate(index_selection[dimension_i].stop - 1, dimension_i)
dimstop += 0.5 * cd.point_spacing
q.append(np.linspace(dimstart, dimstop, gofs.shape[dimension_i] + 1))
if dimension_i == 0:
plt.xlim((dimstart, dimstop))
else:
plt.ylim((dimstart, dimstop))
if statistic == 'likelihood_poisson':
# because ln(a/b) = ln(a) - ln(b), also different ranges
q.append(gofs.T - np.nanmin(gofs))
plt.pcolormesh(*q, vmin=1, vmax=100, alpha=0.9)
plt.colorbar(label=r'$\Delta L$')
else:
q.append(gofs.T / np.nanmin(gofs))
plt.pcolormesh(*q, vmin=1, vmax=4, alpha=0.9)
plt.colorbar(label='Goodness-of-fit / minimum')
plt.xlabel('x [cm]')
plt.ylabel('y [cm]')
return gofs, lowest_indices
def coordinates_to_indices(self, coordinates):
return [self._coordinate_to_index(x, dimension_i) for dimension_i, x in enumerate(coordinates)]
def _coordinate_to_index(self, value, dimension_i):
"""Return array index along dimension_i which contains value.
Raises CoordinateOutOfRangeException if value out of range.
TODO: check if this is faster than just using np.digitize on the index list
"""
cd = self.coordinate_data[dimension_i]
if not cd.minimum - cd.point_spacing / 2 <= value <= cd.maximum + cd.point_spacing / 2:
raise CoordinateOutOfRangeException("%s is not in allowed range %s-%s" % (value, cd.minimum, cd.maximum))
value = max(cd.minimum, min(value, cd.maximum - 0.01 * cd.point_spacing))
return int((value - cd.minimum) / cd.point_spacing + 0.5)
def _index_to_coordinate(self, index_i, dimension_i):
cd = self.coordinate_data[dimension_i]
return cd.minimum + cd.point_spacing * index_i
def _compute_gof_base(self, index_selection, areas_observed, pmt_selection, square_syst_errors, statistic):
"""Compute goodness of fit statistic: see compute_gof
index_selection will be used to slice the spatial histogram.
:return: gof with shape determined by index_selection.
"""
if pmt_selection is None:
pmt_selection = self.default_pmt_selection
if square_syst_errors is None:
square_syst_errors = (self.default_errors * areas_observed) ** 2
# The following aliases are used in the numexprs below
areas_observed = areas_observed.copy()[pmt_selection]
q = self.data[index_selection + [pmt_selection]]
qsum = q.sum(axis=-1)[..., np.newaxis] # noqa
fractions_expected = ne.evaluate("q / qsum") # noqa
total_observed = areas_observed.sum() # noqa
ao = areas_observed # noqa
square_syst_errors = square_syst_errors[pmt_selection] # noqa
# The actual goodness of fit computation is here...
# Areas expected = fractions_expected * sum(areas_observed)
if statistic == 'chi2gamma':
result = ne.evaluate("(ao + where(ao > 1, 1, ao) - {ae})**2 /"
"({ae} + square_syst_errors + 1)".format(ae='fractions_expected * total_observed'))
elif statistic == 'chi2':
result = ne.evaluate("(ao - {ae})**2 /"
"({ae} + square_syst_errors)".format(ae='fractions_expected * total_observed'))
elif statistic == 'likelihood_poisson':
# Poisson likelihood chi-square (Baker and Cousins, 1984)
# Clip areas to range [0.0001, +inf), because of log(0)
areas_expected_clip = np.clip(fractions_expected * total_observed, 1e-10, float('inf'))
areas_observed_clip = np.clip(areas_observed, 1e-10, float('inf'))
result = ne.evaluate("-2*({ao} * log({ae}/{ao}) + {ao} - {ae})".format(ae='areas_expected_clip',
ao='areas_observed_clip'))
else:
raise ValueError('Pattern goodness of fit statistic %s not implemented!' % statistic)
return np.sum(result, axis=-1)
def minimize_gof_grid(self, center_coordinates, grid_size, areas_observed,
pmt_selection=None, square_syst_errors=None, statistic='chi2gamma', plot=False, cls=None):
"""Return (spatial position which minimizes goodness of fit parameter, gof at that position,
errors on that position) minimum is found by minimizing over a grid centered at
center_coordinates and extending by grid_size in all dimensions.
Errors are optionally calculated by tracing contours at given confidence levels, from the
resulting set of points the distances to the minimum are calculated for each dimension and
the mean of these distances is reported as (dx, dy).
All other parameters like compute_gof
"""
gofs, lowest_indices = self.compute_gof_grid(center_coordinates, grid_size, areas_observed,
pmt_selection, square_syst_errors, statistic, plot)
min_index = np.unravel_index(np.nanargmin(gofs), gofs.shape)
# Convert index back to position
result = []
for dimension_i, i_of_minimum in enumerate(min_index):
x = self._index_to_coordinate(lowest_indices[dimension_i] + i_of_minimum, dimension_i)
result.append(x)
# Compute confidence level contours (but only in 2D)
n_dim = len(min_index)
# Store contours for plotting only
cl_segments = []
# Store (dx, dy) for each CL for output
confidence_tuples = []
if cls is not None and n_dim == 2 and _cntr is not None:
x, y = np.mgrid[:gofs.shape[0], :gofs.shape[1]]
# Use matplotlib _Cntr module to trace contours (without plotting)
c = _cntr.Cntr(x, y, gofs)
for cl in cls:
ct = ConfidenceTuple()
ct.level = cl
# Trace at the required value
cl_trace = c.trace(gofs[min_index] + cl)
# Check for failure
if len(cl_trace) == 0:
confidence_tuples.append(ct)
continue
# Get the actual contour, the first half of cl_trace is an array of (x, y) pairs
half_length = int(len(cl_trace)//2)
cl_segment = np.array(cl_trace[:half_length][0])
# Extract the x values and y values seperately, also convert to the TPC coordinate system
x_values = np.array([self._index_to_coordinate(lowest_indices[0] + x, 0) for x in cl_segment[:,0]])
y_values = np.array([self._index_to_coordinate(lowest_indices[1] + y, 1) for y in cl_segment[:,1]])
if np.all(np.isnan(x_values)) or np.all(np.isnan(y_values)):
self.log.debug("Cannot compute confidence contour: all x or y values are Nan!")
# If we'd now call nanmin, we get an annoying numpy runtime warning.
else:
# Calculate the confidence tuple for this CL
ct.x0 = np.nanmin(x_values)
ct.y0 = np.nanmin(y_values)
ct.dx = abs(np.nanmax(x_values) - np.nanmin(x_values))
ct.dy = abs(np.nanmax(y_values) - np.nanmin(y_values))
# Does the contour touch the edge of the TPC
if np.isnan(x_values).any() or np.isnan(y_values).any():
ct.at_edge = True
confidence_tuples.append(ct)
# The contour points, only for plotting
if plot:
contour_points = np.array([x_values, y_values]).T
# Take out point if x or y is nan
contour_points = [p for p in contour_points if not np.isnan(p).any()]
cl_segments.append(contour_points)
if plot and n_dim == 2:
plt.scatter(*[[r] for r in result], marker='*', s=20, color='orange', label='Grid minimum')
for i, contour in enumerate(cl_segments):
if len(contour) == 0:
continue
color = lambda x: 'w' if x % 2 == 0 else 'r'
p = plt.Polygon(contour, fill=False, color=color(i), label=str(cls[i]))
plt.gca().add_artist(p)
# plt.savefig("plot_%.2f_%.2f.pdf" % (result[0], result[1]), dpi=150)
return result, gofs[min_index], confidence_tuples
def minimize_gof_powell(self, start_coordinates, areas_observed,
pmt_selection=None, square_syst_errors=None, statistic='chi2gamma'):
direc = None
if self.dimensions == 2:
# Hack to match old chi2gamma results
s = lambda d: 1 if d < 0 else -1 # flake8: noqa
direc = np.array([[s(start_coordinates[0]), 0],
[0, s(start_coordinates[1])]])
def safe_compute_gof(*args, **kwargs):
try:
return self.compute_gof(*args, **kwargs)
except CoordinateOutOfRangeException:
return float('inf')
# Minimize chi_square_gamma function, fmin_powell is the call to the SciPy minimizer
# It takes the function to minimize, starting position and several options
# It returns the optimal values for the position (xopt) and function value (fopt)
# A warnflag tells if the maximum number of iterations was exceeded
# warnflag 0, OK
# warnflag 1, maximum functions evaluations exceeded
# warnflag 2, maximum iterations exceeded
rv = fmin_powell(safe_compute_gof,
start_coordinates, direc=direc,
args=(areas_observed, pmt_selection, square_syst_errors, statistic),
xtol=0.0001, ftol=0.0001,
maxiter=10, maxfun=None,
full_output=1, disp=0, retall=0)
xopt, fopt, direc, iter, funcalls, warnflag = rv
# On failure the minimizer seems to give np.array([float('inf')])
if isinstance(fopt, np.ndarray):
fopt = float('nan')
return xopt, fopt
| bsd-3-clause |
haiweiosu/Optical-Character-Recognition-using-Template-Matching-Object-Detection-in-Images | task2_4.py | 1 | 1734 | # USAGE
# python sliding_window.py --image images/adrian_florida.jpg
# import the necessary packages
from imagesearch.helpers import pyramid
from imagesearch.helpers import sliding_window
from task2_2_step_3 import lin_svc
from config import negative_training_1, negative_training_2, negative_training_3, negative_training_4
from config import road9, road10, negative_training_1
from skimage.feature import hog
from PIL import Image
from skimage import color, exposure
from sklearn.svm.libsvm import decision_function
from sklearn import svm
import argparse
import time
import cv2
import numpy as np
# load the image and define the window width and height
image = cv2.imread(road9)
(winW, winH) = (225, 225)
false_positives = []
# loop over the image pyramid
for resized in pyramid(image, scale=1.5):
# loop over the sliding window for each layer of the pyramid
for (x, y, window) in sliding_window(resized, stepSize=32, windowSize=(winW, winH)):
# if the window does not meet our desired window size, ignore it
if window.shape[0] != winH or window.shape[1] != winW:
# print window.shape[0], window.shape[1]
continue
# THIS IS WHERE YOU WOULD PROCESS YOUR WINDOW, SUCH AS APPLYING A
# MACHINE LEARNING CLASSIFIER TO CLASSIFY THE CONTENTS OF THE
# WINDOW
hog = cv2.HOGDescriptor()
h = hog.compute(resized)
print(h.flatten())
prediciton = lin_svc.predict(h.reshape(1,-1))
print(prediciton)
threshold = 0.2
loc = np.where( h >= threshold)
# # since we do not have a classifier, we'll just draw the window
clone = resized.copy()
for pt in zip(*loc[::-1]):
cv2.rectangle(clone, pt, (pt[0] + 50, pt[1] + 50), (0, 255, 0), 2)
cv2.imshow("Window", clone)
cv2.waitKey(1)
time.sleep(10) | apache-2.0 |
ndingwall/scikit-learn | sklearn/datasets/tests/test_20news.py | 10 | 5098 | """Test the 20news downloader, if the data is available,
or if specifically requested via environment variable
(e.g. for travis cron job)."""
from functools import partial
from unittest.mock import patch
import pytest
import numpy as np
import scipy.sparse as sp
from sklearn.datasets.tests.test_common import check_as_frame
from sklearn.datasets.tests.test_common import check_pandas_dependency_message
from sklearn.datasets.tests.test_common import check_return_X_y
from sklearn.utils._testing import assert_allclose_dense_sparse
from sklearn.preprocessing import normalize
def test_20news(fetch_20newsgroups_fxt):
data = fetch_20newsgroups_fxt(subset='all', shuffle=False)
# Extract a reduced dataset
data2cats = fetch_20newsgroups_fxt(
subset='all', categories=data.target_names[-1:-3:-1], shuffle=False)
# Check that the ordering of the target_names is the same
# as the ordering in the full dataset
assert data2cats.target_names == data.target_names[-2:]
# Assert that we have only 0 and 1 as labels
assert np.unique(data2cats.target).tolist() == [0, 1]
# Check that the number of filenames is consistent with data/target
assert len(data2cats.filenames) == len(data2cats.target)
assert len(data2cats.filenames) == len(data2cats.data)
# Check that the first entry of the reduced dataset corresponds to
# the first entry of the corresponding category in the full dataset
entry1 = data2cats.data[0]
category = data2cats.target_names[data2cats.target[0]]
label = data.target_names.index(category)
entry2 = data.data[np.where(data.target == label)[0][0]]
assert entry1 == entry2
# check that return_X_y option
X, y = fetch_20newsgroups_fxt(subset='all', shuffle=False, return_X_y=True)
assert len(X) == len(data.data)
assert y.shape == data.target.shape
def test_20news_length_consistency(fetch_20newsgroups_fxt):
"""Checks the length consistencies within the bunch
This is a non-regression test for a bug present in 0.16.1.
"""
# Extract the full dataset
data = fetch_20newsgroups_fxt(subset='all')
assert len(data['data']) == len(data.data)
assert len(data['target']) == len(data.target)
assert len(data['filenames']) == len(data.filenames)
def test_20news_vectorized(fetch_20newsgroups_vectorized_fxt):
# test subset = train
bunch = fetch_20newsgroups_vectorized_fxt(subset="train")
assert sp.isspmatrix_csr(bunch.data)
assert bunch.data.shape == (11314, 130107)
assert bunch.target.shape[0] == 11314
assert bunch.data.dtype == np.float64
# test subset = test
bunch = fetch_20newsgroups_vectorized_fxt(subset="test")
assert sp.isspmatrix_csr(bunch.data)
assert bunch.data.shape == (7532, 130107)
assert bunch.target.shape[0] == 7532
assert bunch.data.dtype == np.float64
# test return_X_y option
fetch_func = partial(fetch_20newsgroups_vectorized_fxt, subset='test')
check_return_X_y(bunch, fetch_func)
# test subset = all
bunch = fetch_20newsgroups_vectorized_fxt(subset='all')
assert sp.isspmatrix_csr(bunch.data)
assert bunch.data.shape == (11314 + 7532, 130107)
assert bunch.target.shape[0] == 11314 + 7532
assert bunch.data.dtype == np.float64
def test_20news_normalization(fetch_20newsgroups_vectorized_fxt):
X = fetch_20newsgroups_vectorized_fxt(normalize=False)
X_ = fetch_20newsgroups_vectorized_fxt(normalize=True)
X_norm = X_['data'][:100]
X = X['data'][:100]
assert_allclose_dense_sparse(X_norm, normalize(X))
assert np.allclose(np.linalg.norm(X_norm.todense(), axis=1), 1)
def test_20news_as_frame(fetch_20newsgroups_vectorized_fxt):
pd = pytest.importorskip('pandas')
bunch = fetch_20newsgroups_vectorized_fxt(as_frame=True)
check_as_frame(bunch, fetch_20newsgroups_vectorized_fxt)
frame = bunch.frame
assert frame.shape == (11314, 130108)
assert all([isinstance(col, pd.SparseDtype) for col in bunch.data.dtypes])
# Check a small subset of features
for expected_feature in [
"beginner",
"beginners",
"beginning",
"beginnings",
"begins",
"begley",
"begone",
]:
assert expected_feature in frame.keys()
assert "category_class" in frame.keys()
assert bunch.target.name == "category_class"
def test_as_frame_no_pandas(
fetch_20newsgroups_vectorized_fxt, hide_available_pandas
):
check_pandas_dependency_message(fetch_20newsgroups_vectorized_fxt)
def test_outdated_pickle(fetch_20newsgroups_vectorized_fxt):
with patch("os.path.exists") as mock_is_exist:
with patch("joblib.load") as mock_load:
# mock that the dataset was cached
mock_is_exist.return_value = True
# mock that we have an outdated pickle with only X and y returned
mock_load.return_value = ("X", "y")
err_msg = "The cached dataset located in"
with pytest.raises(ValueError, match=err_msg):
fetch_20newsgroups_vectorized_fxt(as_frame=True)
| bsd-3-clause |
av8ramit/tensorflow | tensorflow/tools/dist_test/python/census_widendeep.py | 48 | 11896 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Distributed training and evaluation of a wide and deep model."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import json
import os
import sys
from six.moves import urllib
import tensorflow as tf
from tensorflow.contrib.learn.python.learn import learn_runner
from tensorflow.contrib.learn.python.learn.estimators import run_config
# Constants: Data download URLs
TRAIN_DATA_URL = "http://mlr.cs.umass.edu/ml/machine-learning-databases/adult/adult.data"
TEST_DATA_URL = "http://mlr.cs.umass.edu/ml/machine-learning-databases/adult/adult.test"
# Define features for the model
def census_model_config():
"""Configuration for the census Wide & Deep model.
Returns:
columns: Column names to retrieve from the data source
label_column: Name of the label column
wide_columns: List of wide columns
deep_columns: List of deep columns
categorical_column_names: Names of the categorical columns
continuous_column_names: Names of the continuous columns
"""
# 1. Categorical base columns.
gender = tf.contrib.layers.sparse_column_with_keys(
column_name="gender", keys=["female", "male"])
race = tf.contrib.layers.sparse_column_with_keys(
column_name="race",
keys=["Amer-Indian-Eskimo",
"Asian-Pac-Islander",
"Black",
"Other",
"White"])
education = tf.contrib.layers.sparse_column_with_hash_bucket(
"education", hash_bucket_size=1000)
marital_status = tf.contrib.layers.sparse_column_with_hash_bucket(
"marital_status", hash_bucket_size=100)
relationship = tf.contrib.layers.sparse_column_with_hash_bucket(
"relationship", hash_bucket_size=100)
workclass = tf.contrib.layers.sparse_column_with_hash_bucket(
"workclass", hash_bucket_size=100)
occupation = tf.contrib.layers.sparse_column_with_hash_bucket(
"occupation", hash_bucket_size=1000)
native_country = tf.contrib.layers.sparse_column_with_hash_bucket(
"native_country", hash_bucket_size=1000)
# 2. Continuous base columns.
age = tf.contrib.layers.real_valued_column("age")
age_buckets = tf.contrib.layers.bucketized_column(
age, boundaries=[18, 25, 30, 35, 40, 45, 50, 55, 60, 65])
education_num = tf.contrib.layers.real_valued_column("education_num")
capital_gain = tf.contrib.layers.real_valued_column("capital_gain")
capital_loss = tf.contrib.layers.real_valued_column("capital_loss")
hours_per_week = tf.contrib.layers.real_valued_column("hours_per_week")
wide_columns = [
gender, native_country, education, occupation, workclass,
marital_status, relationship, age_buckets,
tf.contrib.layers.crossed_column([education, occupation],
hash_bucket_size=int(1e4)),
tf.contrib.layers.crossed_column([native_country, occupation],
hash_bucket_size=int(1e4)),
tf.contrib.layers.crossed_column([age_buckets, race, occupation],
hash_bucket_size=int(1e6))]
deep_columns = [
tf.contrib.layers.embedding_column(workclass, dimension=8),
tf.contrib.layers.embedding_column(education, dimension=8),
tf.contrib.layers.embedding_column(marital_status, dimension=8),
tf.contrib.layers.embedding_column(gender, dimension=8),
tf.contrib.layers.embedding_column(relationship, dimension=8),
tf.contrib.layers.embedding_column(race, dimension=8),
tf.contrib.layers.embedding_column(native_country, dimension=8),
tf.contrib.layers.embedding_column(occupation, dimension=8),
age, education_num, capital_gain, capital_loss, hours_per_week]
# Define the column names for the data sets.
columns = ["age", "workclass", "fnlwgt", "education", "education_num",
"marital_status", "occupation", "relationship", "race", "gender",
"capital_gain", "capital_loss", "hours_per_week",
"native_country", "income_bracket"]
label_column = "label"
categorical_columns = ["workclass", "education", "marital_status",
"occupation", "relationship", "race", "gender",
"native_country"]
continuous_columns = ["age", "education_num", "capital_gain",
"capital_loss", "hours_per_week"]
return (columns, label_column, wide_columns, deep_columns,
categorical_columns, continuous_columns)
class CensusDataSource(object):
"""Source of census data."""
def __init__(self, data_dir, train_data_url, test_data_url,
columns, label_column,
categorical_columns, continuous_columns):
"""Constructor of CensusDataSource.
Args:
data_dir: Directory to save/load the data files
train_data_url: URL from which the training data can be downloaded
test_data_url: URL from which the test data can be downloaded
columns: Columns to retrieve from the data files (A list of strings)
label_column: Name of the label column
categorical_columns: Names of the categorical columns (A list of strings)
continuous_columns: Names of the continuous columns (A list of strings)
"""
# Retrieve data from disk (if available) or download from the web.
train_file_path = os.path.join(data_dir, "adult.data")
if os.path.isfile(train_file_path):
print("Loading training data from file: %s" % train_file_path)
train_file = open(train_file_path)
else:
urllib.urlretrieve(train_data_url, train_file_path)
test_file_path = os.path.join(data_dir, "adult.test")
if os.path.isfile(test_file_path):
print("Loading test data from file: %s" % test_file_path)
test_file = open(test_file_path)
else:
test_file = open(test_file_path)
urllib.urlretrieve(test_data_url, test_file_path)
# Read the training and testing data sets into Pandas DataFrame.
import pandas # pylint: disable=g-import-not-at-top
self._df_train = pandas.read_csv(train_file, names=columns,
skipinitialspace=True)
self._df_test = pandas.read_csv(test_file, names=columns,
skipinitialspace=True, skiprows=1)
# Remove the NaN values in the last rows of the tables
self._df_train = self._df_train[:-1]
self._df_test = self._df_test[:-1]
# Apply the threshold to get the labels.
income_thresh = lambda x: ">50K" in x
self._df_train[label_column] = (
self._df_train["income_bracket"].apply(income_thresh)).astype(int)
self._df_test[label_column] = (
self._df_test["income_bracket"].apply(income_thresh)).astype(int)
self.label_column = label_column
self.categorical_columns = categorical_columns
self.continuous_columns = continuous_columns
def input_train_fn(self):
return self._input_fn(self._df_train)
def input_test_fn(self):
return self._input_fn(self._df_test)
# TODO(cais): Turn into minibatch feeder
def _input_fn(self, df):
"""Input data function.
Creates a dictionary mapping from each continuous feature column name
(k) to the values of that column stored in a constant Tensor.
Args:
df: data feed
Returns:
feature columns and labels
"""
continuous_cols = {k: tf.constant(df[k].values)
for k in self.continuous_columns}
# Creates a dictionary mapping from each categorical feature column name (k)
# to the values of that column stored in a tf.SparseTensor.
categorical_cols = {
k: tf.SparseTensor(
indices=[[i, 0] for i in range(df[k].size)],
values=df[k].values,
dense_shape=[df[k].size, 1])
for k in self.categorical_columns}
# Merges the two dictionaries into one.
feature_cols = dict(continuous_cols.items() + categorical_cols.items())
# Converts the label column into a constant Tensor.
label = tf.constant(df[self.label_column].values)
# Returns the feature columns and the label.
return feature_cols, label
def _create_experiment_fn(output_dir): # pylint: disable=unused-argument
"""Experiment creation function."""
(columns, label_column, wide_columns, deep_columns, categorical_columns,
continuous_columns) = census_model_config()
census_data_source = CensusDataSource(FLAGS.data_dir,
TRAIN_DATA_URL, TEST_DATA_URL,
columns, label_column,
categorical_columns,
continuous_columns)
os.environ["TF_CONFIG"] = json.dumps({
"cluster": {
tf.contrib.learn.TaskType.PS: ["fake_ps"] *
FLAGS.num_parameter_servers
},
"task": {
"index": FLAGS.worker_index
}
})
config = run_config.RunConfig(master=FLAGS.master_grpc_url)
estimator = tf.contrib.learn.DNNLinearCombinedClassifier(
model_dir=FLAGS.model_dir,
linear_feature_columns=wide_columns,
dnn_feature_columns=deep_columns,
dnn_hidden_units=[5],
config=config)
return tf.contrib.learn.Experiment(
estimator=estimator,
train_input_fn=census_data_source.input_train_fn,
eval_input_fn=census_data_source.input_test_fn,
train_steps=FLAGS.train_steps,
eval_steps=FLAGS.eval_steps
)
def main(unused_argv):
print("Worker index: %d" % FLAGS.worker_index)
learn_runner.run(experiment_fn=_create_experiment_fn,
output_dir=FLAGS.output_dir,
schedule=FLAGS.schedule)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.register("type", "bool", lambda v: v.lower() == "true")
parser.add_argument(
"--data_dir",
type=str,
default="/tmp/census-data",
help="Directory for storing the census data")
parser.add_argument(
"--model_dir",
type=str,
default="/tmp/census_wide_and_deep_model",
help="Directory for storing the model"
)
parser.add_argument(
"--output_dir",
type=str,
default="",
help="Base output directory."
)
parser.add_argument(
"--schedule",
type=str,
default="local_run",
help="Schedule to run for this experiment."
)
parser.add_argument(
"--master_grpc_url",
type=str,
default="",
help="URL to master GRPC tensorflow server, e.g.,grpc://127.0.0.1:2222"
)
parser.add_argument(
"--num_parameter_servers",
type=int,
default=0,
help="Number of parameter servers"
)
parser.add_argument(
"--worker_index",
type=int,
default=0,
help="Worker index (>=0)"
)
parser.add_argument(
"--train_steps",
type=int,
default=1000,
help="Number of training steps"
)
parser.add_argument(
"--eval_steps",
type=int,
default=1,
help="Number of evaluation steps"
)
global FLAGS # pylint:disable=global-at-module-level
FLAGS, unparsed = parser.parse_known_args()
tf.app.run(main=main, argv=[sys.argv[0]] + unparsed)
| apache-2.0 |
plissonf/scikit-learn | sklearn/cluster/__init__.py | 364 | 1228 | """
The :mod:`sklearn.cluster` module gathers popular unsupervised clustering
algorithms.
"""
from .spectral import spectral_clustering, SpectralClustering
from .mean_shift_ import (mean_shift, MeanShift,
estimate_bandwidth, get_bin_seeds)
from .affinity_propagation_ import affinity_propagation, AffinityPropagation
from .hierarchical import (ward_tree, AgglomerativeClustering, linkage_tree,
FeatureAgglomeration)
from .k_means_ import k_means, KMeans, MiniBatchKMeans
from .dbscan_ import dbscan, DBSCAN
from .bicluster import SpectralBiclustering, SpectralCoclustering
from .birch import Birch
__all__ = ['AffinityPropagation',
'AgglomerativeClustering',
'Birch',
'DBSCAN',
'KMeans',
'FeatureAgglomeration',
'MeanShift',
'MiniBatchKMeans',
'SpectralClustering',
'affinity_propagation',
'dbscan',
'estimate_bandwidth',
'get_bin_seeds',
'k_means',
'linkage_tree',
'mean_shift',
'spectral_clustering',
'ward_tree',
'SpectralBiclustering',
'SpectralCoclustering']
| bsd-3-clause |
nhejazi/scikit-learn | examples/applications/plot_stock_market.py | 5 | 9800 | """
=======================================
Visualizing the stock market structure
=======================================
This example employs several unsupervised learning techniques to extract
the stock market structure from variations in historical quotes.
The quantity that we use is the daily variation in quote price: quotes
that are linked tend to cofluctuate during a day.
.. _stock_market:
Learning a graph structure
--------------------------
We use sparse inverse covariance estimation to find which quotes are
correlated conditionally on the others. Specifically, sparse inverse
covariance gives us a graph, that is a list of connection. For each
symbol, the symbols that it is connected too are those useful to explain
its fluctuations.
Clustering
----------
We use clustering to group together quotes that behave similarly. Here,
amongst the :ref:`various clustering techniques <clustering>` available
in the scikit-learn, we use :ref:`affinity_propagation` as it does
not enforce equal-size clusters, and it can choose automatically the
number of clusters from the data.
Note that this gives us a different indication than the graph, as the
graph reflects conditional relations between variables, while the
clustering reflects marginal properties: variables clustered together can
be considered as having a similar impact at the level of the full stock
market.
Embedding in 2D space
---------------------
For visualization purposes, we need to lay out the different symbols on a
2D canvas. For this we use :ref:`manifold` techniques to retrieve 2D
embedding.
Visualization
-------------
The output of the 3 models are combined in a 2D graph where nodes
represents the stocks and edges the:
- cluster labels are used to define the color of the nodes
- the sparse covariance model is used to display the strength of the edges
- the 2D embedding is used to position the nodes in the plan
This example has a fair amount of visualization-related code, as
visualization is crucial here to display the graph. One of the challenge
is to position the labels minimizing overlap. For this we use an
heuristic based on the direction of the nearest neighbor along each
axis.
"""
print(__doc__)
# Author: Gael Varoquaux gael.varoquaux@normalesup.org
# License: BSD 3 clause
from datetime import datetime
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.collections import LineCollection
from six.moves.urllib.request import urlopen
from six.moves.urllib.parse import urlencode
from sklearn import cluster, covariance, manifold
# #############################################################################
# Retrieve the data from Internet
def retry(f, n_attempts=3):
"Wrapper function to retry function calls in case of exceptions"
def wrapper(*args, **kwargs):
for i in range(n_attempts):
try:
return f(*args, **kwargs)
except Exception as e:
if i == n_attempts - 1:
raise
return wrapper
def quotes_historical_google(symbol, date1, date2):
"""Get the historical data from Google finance.
Parameters
----------
symbol : str
Ticker symbol to query for, for example ``"DELL"``.
date1 : datetime.datetime
Start date.
date2 : datetime.datetime
End date.
Returns
-------
X : array
The columns are ``date`` -- datetime, ``open``, ``high``,
``low``, ``close`` and ``volume`` of type float.
"""
params = urlencode({
'q': symbol,
'startdate': date1.strftime('%b %d, %Y'),
'enddate': date2.strftime('%b %d, %Y'),
'output': 'csv'
})
url = 'http://www.google.com/finance/historical?' + params
response = urlopen(url)
dtype = {
'names': ['date', 'open', 'high', 'low', 'close', 'volume'],
'formats': ['object', 'f4', 'f4', 'f4', 'f4', 'f4']
}
converters = {0: lambda s: datetime.strptime(s.decode(), '%d-%b-%y')}
return np.genfromtxt(response, delimiter=',', skip_header=1,
dtype=dtype, converters=converters,
missing_values='-', filling_values=-1)
# Choose a time period reasonably calm (not too long ago so that we get
# high-tech firms, and before the 2008 crash)
d1 = datetime(2003, 1, 1)
d2 = datetime(2008, 1, 1)
symbol_dict = {
'TOT': 'Total',
'XOM': 'Exxon',
'CVX': 'Chevron',
'COP': 'ConocoPhillips',
'VLO': 'Valero Energy',
'MSFT': 'Microsoft',
'IBM': 'IBM',
'TWX': 'Time Warner',
'CMCSA': 'Comcast',
'CVC': 'Cablevision',
'YHOO': 'Yahoo',
'DELL': 'Dell',
'HPQ': 'HP',
'AMZN': 'Amazon',
'TM': 'Toyota',
'CAJ': 'Canon',
'SNE': 'Sony',
'F': 'Ford',
'HMC': 'Honda',
'NAV': 'Navistar',
'NOC': 'Northrop Grumman',
'BA': 'Boeing',
'KO': 'Coca Cola',
'MMM': '3M',
'MCD': 'McDonald\'s',
'PEP': 'Pepsi',
'K': 'Kellogg',
'UN': 'Unilever',
'MAR': 'Marriott',
'PG': 'Procter Gamble',
'CL': 'Colgate-Palmolive',
'GE': 'General Electrics',
'WFC': 'Wells Fargo',
'JPM': 'JPMorgan Chase',
'AIG': 'AIG',
'AXP': 'American express',
'BAC': 'Bank of America',
'GS': 'Goldman Sachs',
'AAPL': 'Apple',
'SAP': 'SAP',
'CSCO': 'Cisco',
'TXN': 'Texas Instruments',
'XRX': 'Xerox',
'WMT': 'Wal-Mart',
'HD': 'Home Depot',
'GSK': 'GlaxoSmithKline',
'PFE': 'Pfizer',
'SNY': 'Sanofi-Aventis',
'NVS': 'Novartis',
'KMB': 'Kimberly-Clark',
'R': 'Ryder',
'GD': 'General Dynamics',
'RTN': 'Raytheon',
'CVS': 'CVS',
'CAT': 'Caterpillar',
'DD': 'DuPont de Nemours'}
symbols, names = np.array(list(symbol_dict.items())).T
# retry is used because quotes_historical_google can temporarily fail
# for various reasons (e.g. empty result from Google API).
quotes = [
retry(quotes_historical_google)(symbol, d1, d2) for symbol in symbols
]
close_prices = np.vstack([q['close'] for q in quotes])
open_prices = np.vstack([q['open'] for q in quotes])
# The daily variations of the quotes are what carry most information
variation = close_prices - open_prices
# #############################################################################
# Learn a graphical structure from the correlations
edge_model = covariance.GraphLassoCV()
# standardize the time series: using correlations rather than covariance
# is more efficient for structure recovery
X = variation.copy().T
X /= X.std(axis=0)
edge_model.fit(X)
# #############################################################################
# Cluster using affinity propagation
_, labels = cluster.affinity_propagation(edge_model.covariance_)
n_labels = labels.max()
for i in range(n_labels + 1):
print('Cluster %i: %s' % ((i + 1), ', '.join(names[labels == i])))
# #############################################################################
# Find a low-dimension embedding for visualization: find the best position of
# the nodes (the stocks) on a 2D plane
# We use a dense eigen_solver to achieve reproducibility (arpack is
# initiated with random vectors that we don't control). In addition, we
# use a large number of neighbors to capture the large-scale structure.
node_position_model = manifold.LocallyLinearEmbedding(
n_components=2, eigen_solver='dense', n_neighbors=6)
embedding = node_position_model.fit_transform(X.T).T
# #############################################################################
# Visualization
plt.figure(1, facecolor='w', figsize=(10, 8))
plt.clf()
ax = plt.axes([0., 0., 1., 1.])
plt.axis('off')
# Display a graph of the partial correlations
partial_correlations = edge_model.precision_.copy()
d = 1 / np.sqrt(np.diag(partial_correlations))
partial_correlations *= d
partial_correlations *= d[:, np.newaxis]
non_zero = (np.abs(np.triu(partial_correlations, k=1)) > 0.02)
# Plot the nodes using the coordinates of our embedding
plt.scatter(embedding[0], embedding[1], s=100 * d ** 2, c=labels,
cmap=plt.cm.spectral)
# Plot the edges
start_idx, end_idx = np.where(non_zero)
# a sequence of (*line0*, *line1*, *line2*), where::
# linen = (x0, y0), (x1, y1), ... (xm, ym)
segments = [[embedding[:, start], embedding[:, stop]]
for start, stop in zip(start_idx, end_idx)]
values = np.abs(partial_correlations[non_zero])
lc = LineCollection(segments,
zorder=0, cmap=plt.cm.hot_r,
norm=plt.Normalize(0, .7 * values.max()))
lc.set_array(values)
lc.set_linewidths(15 * values)
ax.add_collection(lc)
# Add a label to each node. The challenge here is that we want to
# position the labels to avoid overlap with other labels
for index, (name, label, (x, y)) in enumerate(
zip(names, labels, embedding.T)):
dx = x - embedding[0]
dx[index] = 1
dy = y - embedding[1]
dy[index] = 1
this_dx = dx[np.argmin(np.abs(dy))]
this_dy = dy[np.argmin(np.abs(dx))]
if this_dx > 0:
horizontalalignment = 'left'
x = x + .002
else:
horizontalalignment = 'right'
x = x - .002
if this_dy > 0:
verticalalignment = 'bottom'
y = y + .002
else:
verticalalignment = 'top'
y = y - .002
plt.text(x, y, name, size=10,
horizontalalignment=horizontalalignment,
verticalalignment=verticalalignment,
bbox=dict(facecolor='w',
edgecolor=plt.cm.spectral(label / float(n_labels)),
alpha=.6))
plt.xlim(embedding[0].min() - .15 * embedding[0].ptp(),
embedding[0].max() + .10 * embedding[0].ptp(),)
plt.ylim(embedding[1].min() - .03 * embedding[1].ptp(),
embedding[1].max() + .03 * embedding[1].ptp())
plt.show()
| bsd-3-clause |
Ninjakow/TrueSkill | lib/numpy/lib/recfunctions.py | 148 | 35012 | """
Collection of utilities to manipulate structured arrays.
Most of these functions were initially implemented by John Hunter for
matplotlib. They have been rewritten and extended for convenience.
"""
from __future__ import division, absolute_import, print_function
import sys
import itertools
import numpy as np
import numpy.ma as ma
from numpy import ndarray, recarray
from numpy.ma import MaskedArray
from numpy.ma.mrecords import MaskedRecords
from numpy.lib._iotools import _is_string_like
from numpy.compat import basestring
if sys.version_info[0] < 3:
from future_builtins import zip
_check_fill_value = np.ma.core._check_fill_value
__all__ = [
'append_fields', 'drop_fields', 'find_duplicates',
'get_fieldstructure', 'join_by', 'merge_arrays',
'rec_append_fields', 'rec_drop_fields', 'rec_join',
'recursive_fill_fields', 'rename_fields', 'stack_arrays',
]
def recursive_fill_fields(input, output):
"""
Fills fields from output with fields from input,
with support for nested structures.
Parameters
----------
input : ndarray
Input array.
output : ndarray
Output array.
Notes
-----
* `output` should be at least the same size as `input`
Examples
--------
>>> from numpy.lib import recfunctions as rfn
>>> a = np.array([(1, 10.), (2, 20.)], dtype=[('A', int), ('B', float)])
>>> b = np.zeros((3,), dtype=a.dtype)
>>> rfn.recursive_fill_fields(a, b)
array([(1, 10.0), (2, 20.0), (0, 0.0)],
dtype=[('A', '<i4'), ('B', '<f8')])
"""
newdtype = output.dtype
for field in newdtype.names:
try:
current = input[field]
except ValueError:
continue
if current.dtype.names:
recursive_fill_fields(current, output[field])
else:
output[field][:len(current)] = current
return output
def get_names(adtype):
"""
Returns the field names of the input datatype as a tuple.
Parameters
----------
adtype : dtype
Input datatype
Examples
--------
>>> from numpy.lib import recfunctions as rfn
>>> rfn.get_names(np.empty((1,), dtype=int)) is None
True
>>> rfn.get_names(np.empty((1,), dtype=[('A',int), ('B', float)]))
('A', 'B')
>>> adtype = np.dtype([('a', int), ('b', [('ba', int), ('bb', int)])])
>>> rfn.get_names(adtype)
('a', ('b', ('ba', 'bb')))
"""
listnames = []
names = adtype.names
for name in names:
current = adtype[name]
if current.names:
listnames.append((name, tuple(get_names(current))))
else:
listnames.append(name)
return tuple(listnames) or None
def get_names_flat(adtype):
"""
Returns the field names of the input datatype as a tuple. Nested structure
are flattend beforehand.
Parameters
----------
adtype : dtype
Input datatype
Examples
--------
>>> from numpy.lib import recfunctions as rfn
>>> rfn.get_names_flat(np.empty((1,), dtype=int)) is None
True
>>> rfn.get_names_flat(np.empty((1,), dtype=[('A',int), ('B', float)]))
('A', 'B')
>>> adtype = np.dtype([('a', int), ('b', [('ba', int), ('bb', int)])])
>>> rfn.get_names_flat(adtype)
('a', 'b', 'ba', 'bb')
"""
listnames = []
names = adtype.names
for name in names:
listnames.append(name)
current = adtype[name]
if current.names:
listnames.extend(get_names_flat(current))
return tuple(listnames) or None
def flatten_descr(ndtype):
"""
Flatten a structured data-type description.
Examples
--------
>>> from numpy.lib import recfunctions as rfn
>>> ndtype = np.dtype([('a', '<i4'), ('b', [('ba', '<f8'), ('bb', '<i4')])])
>>> rfn.flatten_descr(ndtype)
(('a', dtype('int32')), ('ba', dtype('float64')), ('bb', dtype('int32')))
"""
names = ndtype.names
if names is None:
return ndtype.descr
else:
descr = []
for field in names:
(typ, _) = ndtype.fields[field]
if typ.names:
descr.extend(flatten_descr(typ))
else:
descr.append((field, typ))
return tuple(descr)
def zip_descr(seqarrays, flatten=False):
"""
Combine the dtype description of a series of arrays.
Parameters
----------
seqarrays : sequence of arrays
Sequence of arrays
flatten : {boolean}, optional
Whether to collapse nested descriptions.
"""
newdtype = []
if flatten:
for a in seqarrays:
newdtype.extend(flatten_descr(a.dtype))
else:
for a in seqarrays:
current = a.dtype
names = current.names or ()
if len(names) > 1:
newdtype.append(('', current.descr))
else:
newdtype.extend(current.descr)
return np.dtype(newdtype).descr
def get_fieldstructure(adtype, lastname=None, parents=None,):
"""
Returns a dictionary with fields indexing lists of their parent fields.
This function is used to simplify access to fields nested in other fields.
Parameters
----------
adtype : np.dtype
Input datatype
lastname : optional
Last processed field name (used internally during recursion).
parents : dictionary
Dictionary of parent fields (used interbally during recursion).
Examples
--------
>>> from numpy.lib import recfunctions as rfn
>>> ndtype = np.dtype([('A', int),
... ('B', [('BA', int),
... ('BB', [('BBA', int), ('BBB', int)])])])
>>> rfn.get_fieldstructure(ndtype)
... # XXX: possible regression, order of BBA and BBB is swapped
{'A': [], 'B': [], 'BA': ['B'], 'BB': ['B'], 'BBA': ['B', 'BB'], 'BBB': ['B', 'BB']}
"""
if parents is None:
parents = {}
names = adtype.names
for name in names:
current = adtype[name]
if current.names:
if lastname:
parents[name] = [lastname, ]
else:
parents[name] = []
parents.update(get_fieldstructure(current, name, parents))
else:
lastparent = [_ for _ in (parents.get(lastname, []) or [])]
if lastparent:
lastparent.append(lastname)
elif lastname:
lastparent = [lastname, ]
parents[name] = lastparent or []
return parents or None
def _izip_fields_flat(iterable):
"""
Returns an iterator of concatenated fields from a sequence of arrays,
collapsing any nested structure.
"""
for element in iterable:
if isinstance(element, np.void):
for f in _izip_fields_flat(tuple(element)):
yield f
else:
yield element
def _izip_fields(iterable):
"""
Returns an iterator of concatenated fields from a sequence of arrays.
"""
for element in iterable:
if (hasattr(element, '__iter__') and
not isinstance(element, basestring)):
for f in _izip_fields(element):
yield f
elif isinstance(element, np.void) and len(tuple(element)) == 1:
for f in _izip_fields(element):
yield f
else:
yield element
def izip_records(seqarrays, fill_value=None, flatten=True):
"""
Returns an iterator of concatenated items from a sequence of arrays.
Parameters
----------
seqarrays : sequence of arrays
Sequence of arrays.
fill_value : {None, integer}
Value used to pad shorter iterables.
flatten : {True, False},
Whether to
"""
# OK, that's a complete ripoff from Python2.6 itertools.izip_longest
def sentinel(counter=([fill_value] * (len(seqarrays) - 1)).pop):
"Yields the fill_value or raises IndexError"
yield counter()
#
fillers = itertools.repeat(fill_value)
iters = [itertools.chain(it, sentinel(), fillers) for it in seqarrays]
# Should we flatten the items, or just use a nested approach
if flatten:
zipfunc = _izip_fields_flat
else:
zipfunc = _izip_fields
#
try:
for tup in zip(*iters):
yield tuple(zipfunc(tup))
except IndexError:
pass
def _fix_output(output, usemask=True, asrecarray=False):
"""
Private function: return a recarray, a ndarray, a MaskedArray
or a MaskedRecords depending on the input parameters
"""
if not isinstance(output, MaskedArray):
usemask = False
if usemask:
if asrecarray:
output = output.view(MaskedRecords)
else:
output = ma.filled(output)
if asrecarray:
output = output.view(recarray)
return output
def _fix_defaults(output, defaults=None):
"""
Update the fill_value and masked data of `output`
from the default given in a dictionary defaults.
"""
names = output.dtype.names
(data, mask, fill_value) = (output.data, output.mask, output.fill_value)
for (k, v) in (defaults or {}).items():
if k in names:
fill_value[k] = v
data[k][mask[k]] = v
return output
def merge_arrays(seqarrays, fill_value=-1, flatten=False,
usemask=False, asrecarray=False):
"""
Merge arrays field by field.
Parameters
----------
seqarrays : sequence of ndarrays
Sequence of arrays
fill_value : {float}, optional
Filling value used to pad missing data on the shorter arrays.
flatten : {False, True}, optional
Whether to collapse nested fields.
usemask : {False, True}, optional
Whether to return a masked array or not.
asrecarray : {False, True}, optional
Whether to return a recarray (MaskedRecords) or not.
Examples
--------
>>> from numpy.lib import recfunctions as rfn
>>> rfn.merge_arrays((np.array([1, 2]), np.array([10., 20., 30.])))
masked_array(data = [(1, 10.0) (2, 20.0) (--, 30.0)],
mask = [(False, False) (False, False) (True, False)],
fill_value = (999999, 1e+20),
dtype = [('f0', '<i4'), ('f1', '<f8')])
>>> rfn.merge_arrays((np.array([1, 2]), np.array([10., 20., 30.])),
... usemask=False)
array([(1, 10.0), (2, 20.0), (-1, 30.0)],
dtype=[('f0', '<i4'), ('f1', '<f8')])
>>> rfn.merge_arrays((np.array([1, 2]).view([('a', int)]),
... np.array([10., 20., 30.])),
... usemask=False, asrecarray=True)
rec.array([(1, 10.0), (2, 20.0), (-1, 30.0)],
dtype=[('a', '<i4'), ('f1', '<f8')])
Notes
-----
* Without a mask, the missing value will be filled with something,
* depending on what its corresponding type:
-1 for integers
-1.0 for floating point numbers
'-' for characters
'-1' for strings
True for boolean values
* XXX: I just obtained these values empirically
"""
# Only one item in the input sequence ?
if (len(seqarrays) == 1):
seqarrays = np.asanyarray(seqarrays[0])
# Do we have a single ndarray as input ?
if isinstance(seqarrays, (ndarray, np.void)):
seqdtype = seqarrays.dtype
if (not flatten) or \
(zip_descr((seqarrays,), flatten=True) == seqdtype.descr):
# Minimal processing needed: just make sure everythng's a-ok
seqarrays = seqarrays.ravel()
# Make sure we have named fields
if not seqdtype.names:
seqdtype = [('', seqdtype)]
# Find what type of array we must return
if usemask:
if asrecarray:
seqtype = MaskedRecords
else:
seqtype = MaskedArray
elif asrecarray:
seqtype = recarray
else:
seqtype = ndarray
return seqarrays.view(dtype=seqdtype, type=seqtype)
else:
seqarrays = (seqarrays,)
else:
# Make sure we have arrays in the input sequence
seqarrays = [np.asanyarray(_m) for _m in seqarrays]
# Find the sizes of the inputs and their maximum
sizes = tuple(a.size for a in seqarrays)
maxlength = max(sizes)
# Get the dtype of the output (flattening if needed)
newdtype = zip_descr(seqarrays, flatten=flatten)
# Initialize the sequences for data and mask
seqdata = []
seqmask = []
# If we expect some kind of MaskedArray, make a special loop.
if usemask:
for (a, n) in zip(seqarrays, sizes):
nbmissing = (maxlength - n)
# Get the data and mask
data = a.ravel().__array__()
mask = ma.getmaskarray(a).ravel()
# Get the filling value (if needed)
if nbmissing:
fval = _check_fill_value(fill_value, a.dtype)
if isinstance(fval, (ndarray, np.void)):
if len(fval.dtype) == 1:
fval = fval.item()[0]
fmsk = True
else:
fval = np.array(fval, dtype=a.dtype, ndmin=1)
fmsk = np.ones((1,), dtype=mask.dtype)
else:
fval = None
fmsk = True
# Store an iterator padding the input to the expected length
seqdata.append(itertools.chain(data, [fval] * nbmissing))
seqmask.append(itertools.chain(mask, [fmsk] * nbmissing))
# Create an iterator for the data
data = tuple(izip_records(seqdata, flatten=flatten))
output = ma.array(np.fromiter(data, dtype=newdtype, count=maxlength),
mask=list(izip_records(seqmask, flatten=flatten)))
if asrecarray:
output = output.view(MaskedRecords)
else:
# Same as before, without the mask we don't need...
for (a, n) in zip(seqarrays, sizes):
nbmissing = (maxlength - n)
data = a.ravel().__array__()
if nbmissing:
fval = _check_fill_value(fill_value, a.dtype)
if isinstance(fval, (ndarray, np.void)):
if len(fval.dtype) == 1:
fval = fval.item()[0]
else:
fval = np.array(fval, dtype=a.dtype, ndmin=1)
else:
fval = None
seqdata.append(itertools.chain(data, [fval] * nbmissing))
output = np.fromiter(tuple(izip_records(seqdata, flatten=flatten)),
dtype=newdtype, count=maxlength)
if asrecarray:
output = output.view(recarray)
# And we're done...
return output
def drop_fields(base, drop_names, usemask=True, asrecarray=False):
"""
Return a new array with fields in `drop_names` dropped.
Nested fields are supported.
Parameters
----------
base : array
Input array
drop_names : string or sequence
String or sequence of strings corresponding to the names of the
fields to drop.
usemask : {False, True}, optional
Whether to return a masked array or not.
asrecarray : string or sequence, optional
Whether to return a recarray or a mrecarray (`asrecarray=True`) or
a plain ndarray or masked array with flexible dtype. The default
is False.
Examples
--------
>>> from numpy.lib import recfunctions as rfn
>>> a = np.array([(1, (2, 3.0)), (4, (5, 6.0))],
... dtype=[('a', int), ('b', [('ba', float), ('bb', int)])])
>>> rfn.drop_fields(a, 'a')
array([((2.0, 3),), ((5.0, 6),)],
dtype=[('b', [('ba', '<f8'), ('bb', '<i4')])])
>>> rfn.drop_fields(a, 'ba')
array([(1, (3,)), (4, (6,))],
dtype=[('a', '<i4'), ('b', [('bb', '<i4')])])
>>> rfn.drop_fields(a, ['ba', 'bb'])
array([(1,), (4,)],
dtype=[('a', '<i4')])
"""
if _is_string_like(drop_names):
drop_names = [drop_names, ]
else:
drop_names = set(drop_names)
def _drop_descr(ndtype, drop_names):
names = ndtype.names
newdtype = []
for name in names:
current = ndtype[name]
if name in drop_names:
continue
if current.names:
descr = _drop_descr(current, drop_names)
if descr:
newdtype.append((name, descr))
else:
newdtype.append((name, current))
return newdtype
newdtype = _drop_descr(base.dtype, drop_names)
if not newdtype:
return None
output = np.empty(base.shape, dtype=newdtype)
output = recursive_fill_fields(base, output)
return _fix_output(output, usemask=usemask, asrecarray=asrecarray)
def rec_drop_fields(base, drop_names):
"""
Returns a new numpy.recarray with fields in `drop_names` dropped.
"""
return drop_fields(base, drop_names, usemask=False, asrecarray=True)
def rename_fields(base, namemapper):
"""
Rename the fields from a flexible-datatype ndarray or recarray.
Nested fields are supported.
Parameters
----------
base : ndarray
Input array whose fields must be modified.
namemapper : dictionary
Dictionary mapping old field names to their new version.
Examples
--------
>>> from numpy.lib import recfunctions as rfn
>>> a = np.array([(1, (2, [3.0, 30.])), (4, (5, [6.0, 60.]))],
... dtype=[('a', int),('b', [('ba', float), ('bb', (float, 2))])])
>>> rfn.rename_fields(a, {'a':'A', 'bb':'BB'})
array([(1, (2.0, [3.0, 30.0])), (4, (5.0, [6.0, 60.0]))],
dtype=[('A', '<i4'), ('b', [('ba', '<f8'), ('BB', '<f8', 2)])])
"""
def _recursive_rename_fields(ndtype, namemapper):
newdtype = []
for name in ndtype.names:
newname = namemapper.get(name, name)
current = ndtype[name]
if current.names:
newdtype.append(
(newname, _recursive_rename_fields(current, namemapper))
)
else:
newdtype.append((newname, current))
return newdtype
newdtype = _recursive_rename_fields(base.dtype, namemapper)
return base.view(newdtype)
def append_fields(base, names, data, dtypes=None,
fill_value=-1, usemask=True, asrecarray=False):
"""
Add new fields to an existing array.
The names of the fields are given with the `names` arguments,
the corresponding values with the `data` arguments.
If a single field is appended, `names`, `data` and `dtypes` do not have
to be lists but just values.
Parameters
----------
base : array
Input array to extend.
names : string, sequence
String or sequence of strings corresponding to the names
of the new fields.
data : array or sequence of arrays
Array or sequence of arrays storing the fields to add to the base.
dtypes : sequence of datatypes, optional
Datatype or sequence of datatypes.
If None, the datatypes are estimated from the `data`.
fill_value : {float}, optional
Filling value used to pad missing data on the shorter arrays.
usemask : {False, True}, optional
Whether to return a masked array or not.
asrecarray : {False, True}, optional
Whether to return a recarray (MaskedRecords) or not.
"""
# Check the names
if isinstance(names, (tuple, list)):
if len(names) != len(data):
msg = "The number of arrays does not match the number of names"
raise ValueError(msg)
elif isinstance(names, basestring):
names = [names, ]
data = [data, ]
#
if dtypes is None:
data = [np.array(a, copy=False, subok=True) for a in data]
data = [a.view([(name, a.dtype)]) for (name, a) in zip(names, data)]
else:
if not isinstance(dtypes, (tuple, list)):
dtypes = [dtypes, ]
if len(data) != len(dtypes):
if len(dtypes) == 1:
dtypes = dtypes * len(data)
else:
msg = "The dtypes argument must be None, a dtype, or a list."
raise ValueError(msg)
data = [np.array(a, copy=False, subok=True, dtype=d).view([(n, d)])
for (a, n, d) in zip(data, names, dtypes)]
#
base = merge_arrays(base, usemask=usemask, fill_value=fill_value)
if len(data) > 1:
data = merge_arrays(data, flatten=True, usemask=usemask,
fill_value=fill_value)
else:
data = data.pop()
#
output = ma.masked_all(max(len(base), len(data)),
dtype=base.dtype.descr + data.dtype.descr)
output = recursive_fill_fields(base, output)
output = recursive_fill_fields(data, output)
#
return _fix_output(output, usemask=usemask, asrecarray=asrecarray)
def rec_append_fields(base, names, data, dtypes=None):
"""
Add new fields to an existing array.
The names of the fields are given with the `names` arguments,
the corresponding values with the `data` arguments.
If a single field is appended, `names`, `data` and `dtypes` do not have
to be lists but just values.
Parameters
----------
base : array
Input array to extend.
names : string, sequence
String or sequence of strings corresponding to the names
of the new fields.
data : array or sequence of arrays
Array or sequence of arrays storing the fields to add to the base.
dtypes : sequence of datatypes, optional
Datatype or sequence of datatypes.
If None, the datatypes are estimated from the `data`.
See Also
--------
append_fields
Returns
-------
appended_array : np.recarray
"""
return append_fields(base, names, data=data, dtypes=dtypes,
asrecarray=True, usemask=False)
def stack_arrays(arrays, defaults=None, usemask=True, asrecarray=False,
autoconvert=False):
"""
Superposes arrays fields by fields
Parameters
----------
arrays : array or sequence
Sequence of input arrays.
defaults : dictionary, optional
Dictionary mapping field names to the corresponding default values.
usemask : {True, False}, optional
Whether to return a MaskedArray (or MaskedRecords is
`asrecarray==True`) or a ndarray.
asrecarray : {False, True}, optional
Whether to return a recarray (or MaskedRecords if `usemask==True`)
or just a flexible-type ndarray.
autoconvert : {False, True}, optional
Whether automatically cast the type of the field to the maximum.
Examples
--------
>>> from numpy.lib import recfunctions as rfn
>>> x = np.array([1, 2,])
>>> rfn.stack_arrays(x) is x
True
>>> z = np.array([('A', 1), ('B', 2)], dtype=[('A', '|S3'), ('B', float)])
>>> zz = np.array([('a', 10., 100.), ('b', 20., 200.), ('c', 30., 300.)],
... dtype=[('A', '|S3'), ('B', float), ('C', float)])
>>> test = rfn.stack_arrays((z,zz))
>>> test
masked_array(data = [('A', 1.0, --) ('B', 2.0, --) ('a', 10.0, 100.0) ('b', 20.0, 200.0)
('c', 30.0, 300.0)],
mask = [(False, False, True) (False, False, True) (False, False, False)
(False, False, False) (False, False, False)],
fill_value = ('N/A', 1e+20, 1e+20),
dtype = [('A', '|S3'), ('B', '<f8'), ('C', '<f8')])
"""
if isinstance(arrays, ndarray):
return arrays
elif len(arrays) == 1:
return arrays[0]
seqarrays = [np.asanyarray(a).ravel() for a in arrays]
nrecords = [len(a) for a in seqarrays]
ndtype = [a.dtype for a in seqarrays]
fldnames = [d.names for d in ndtype]
#
dtype_l = ndtype[0]
newdescr = dtype_l.descr
names = [_[0] for _ in newdescr]
for dtype_n in ndtype[1:]:
for descr in dtype_n.descr:
name = descr[0] or ''
if name not in names:
newdescr.append(descr)
names.append(name)
else:
nameidx = names.index(name)
current_descr = newdescr[nameidx]
if autoconvert:
if np.dtype(descr[1]) > np.dtype(current_descr[-1]):
current_descr = list(current_descr)
current_descr[-1] = descr[1]
newdescr[nameidx] = tuple(current_descr)
elif descr[1] != current_descr[-1]:
raise TypeError("Incompatible type '%s' <> '%s'" %
(dict(newdescr)[name], descr[1]))
# Only one field: use concatenate
if len(newdescr) == 1:
output = ma.concatenate(seqarrays)
else:
#
output = ma.masked_all((np.sum(nrecords),), newdescr)
offset = np.cumsum(np.r_[0, nrecords])
seen = []
for (a, n, i, j) in zip(seqarrays, fldnames, offset[:-1], offset[1:]):
names = a.dtype.names
if names is None:
output['f%i' % len(seen)][i:j] = a
else:
for name in n:
output[name][i:j] = a[name]
if name not in seen:
seen.append(name)
#
return _fix_output(_fix_defaults(output, defaults),
usemask=usemask, asrecarray=asrecarray)
def find_duplicates(a, key=None, ignoremask=True, return_index=False):
"""
Find the duplicates in a structured array along a given key
Parameters
----------
a : array-like
Input array
key : {string, None}, optional
Name of the fields along which to check the duplicates.
If None, the search is performed by records
ignoremask : {True, False}, optional
Whether masked data should be discarded or considered as duplicates.
return_index : {False, True}, optional
Whether to return the indices of the duplicated values.
Examples
--------
>>> from numpy.lib import recfunctions as rfn
>>> ndtype = [('a', int)]
>>> a = np.ma.array([1, 1, 1, 2, 2, 3, 3],
... mask=[0, 0, 1, 0, 0, 0, 1]).view(ndtype)
>>> rfn.find_duplicates(a, ignoremask=True, return_index=True)
... # XXX: judging by the output, the ignoremask flag has no effect
"""
a = np.asanyarray(a).ravel()
# Get a dictionary of fields
fields = get_fieldstructure(a.dtype)
# Get the sorting data (by selecting the corresponding field)
base = a
if key:
for f in fields[key]:
base = base[f]
base = base[key]
# Get the sorting indices and the sorted data
sortidx = base.argsort()
sortedbase = base[sortidx]
sorteddata = sortedbase.filled()
# Compare the sorting data
flag = (sorteddata[:-1] == sorteddata[1:])
# If masked data must be ignored, set the flag to false where needed
if ignoremask:
sortedmask = sortedbase.recordmask
flag[sortedmask[1:]] = False
flag = np.concatenate(([False], flag))
# We need to take the point on the left as well (else we're missing it)
flag[:-1] = flag[:-1] + flag[1:]
duplicates = a[sortidx][flag]
if return_index:
return (duplicates, sortidx[flag])
else:
return duplicates
def join_by(key, r1, r2, jointype='inner', r1postfix='1', r2postfix='2',
defaults=None, usemask=True, asrecarray=False):
"""
Join arrays `r1` and `r2` on key `key`.
The key should be either a string or a sequence of string corresponding
to the fields used to join the array. An exception is raised if the
`key` field cannot be found in the two input arrays. Neither `r1` nor
`r2` should have any duplicates along `key`: the presence of duplicates
will make the output quite unreliable. Note that duplicates are not
looked for by the algorithm.
Parameters
----------
key : {string, sequence}
A string or a sequence of strings corresponding to the fields used
for comparison.
r1, r2 : arrays
Structured arrays.
jointype : {'inner', 'outer', 'leftouter'}, optional
If 'inner', returns the elements common to both r1 and r2.
If 'outer', returns the common elements as well as the elements of
r1 not in r2 and the elements of not in r2.
If 'leftouter', returns the common elements and the elements of r1
not in r2.
r1postfix : string, optional
String appended to the names of the fields of r1 that are present
in r2 but absent of the key.
r2postfix : string, optional
String appended to the names of the fields of r2 that are present
in r1 but absent of the key.
defaults : {dictionary}, optional
Dictionary mapping field names to the corresponding default values.
usemask : {True, False}, optional
Whether to return a MaskedArray (or MaskedRecords is
`asrecarray==True`) or a ndarray.
asrecarray : {False, True}, optional
Whether to return a recarray (or MaskedRecords if `usemask==True`)
or just a flexible-type ndarray.
Notes
-----
* The output is sorted along the key.
* A temporary array is formed by dropping the fields not in the key for
the two arrays and concatenating the result. This array is then
sorted, and the common entries selected. The output is constructed by
filling the fields with the selected entries. Matching is not
preserved if there are some duplicates...
"""
# Check jointype
if jointype not in ('inner', 'outer', 'leftouter'):
raise ValueError(
"The 'jointype' argument should be in 'inner', "
"'outer' or 'leftouter' (got '%s' instead)" % jointype
)
# If we have a single key, put it in a tuple
if isinstance(key, basestring):
key = (key,)
# Check the keys
for name in key:
if name not in r1.dtype.names:
raise ValueError('r1 does not have key field %s' % name)
if name not in r2.dtype.names:
raise ValueError('r2 does not have key field %s' % name)
# Make sure we work with ravelled arrays
r1 = r1.ravel()
r2 = r2.ravel()
# Fixme: nb2 below is never used. Commenting out for pyflakes.
# (nb1, nb2) = (len(r1), len(r2))
nb1 = len(r1)
(r1names, r2names) = (r1.dtype.names, r2.dtype.names)
# Check the names for collision
if (set.intersection(set(r1names), set(r2names)).difference(key) and
not (r1postfix or r2postfix)):
msg = "r1 and r2 contain common names, r1postfix and r2postfix "
msg += "can't be empty"
raise ValueError(msg)
# Make temporary arrays of just the keys
r1k = drop_fields(r1, [n for n in r1names if n not in key])
r2k = drop_fields(r2, [n for n in r2names if n not in key])
# Concatenate the two arrays for comparison
aux = ma.concatenate((r1k, r2k))
idx_sort = aux.argsort(order=key)
aux = aux[idx_sort]
#
# Get the common keys
flag_in = ma.concatenate(([False], aux[1:] == aux[:-1]))
flag_in[:-1] = flag_in[1:] + flag_in[:-1]
idx_in = idx_sort[flag_in]
idx_1 = idx_in[(idx_in < nb1)]
idx_2 = idx_in[(idx_in >= nb1)] - nb1
(r1cmn, r2cmn) = (len(idx_1), len(idx_2))
if jointype == 'inner':
(r1spc, r2spc) = (0, 0)
elif jointype == 'outer':
idx_out = idx_sort[~flag_in]
idx_1 = np.concatenate((idx_1, idx_out[(idx_out < nb1)]))
idx_2 = np.concatenate((idx_2, idx_out[(idx_out >= nb1)] - nb1))
(r1spc, r2spc) = (len(idx_1) - r1cmn, len(idx_2) - r2cmn)
elif jointype == 'leftouter':
idx_out = idx_sort[~flag_in]
idx_1 = np.concatenate((idx_1, idx_out[(idx_out < nb1)]))
(r1spc, r2spc) = (len(idx_1) - r1cmn, 0)
# Select the entries from each input
(s1, s2) = (r1[idx_1], r2[idx_2])
#
# Build the new description of the output array .......
# Start with the key fields
ndtype = [list(_) for _ in r1k.dtype.descr]
# Add the other fields
ndtype.extend(list(_) for _ in r1.dtype.descr if _[0] not in key)
# Find the new list of names (it may be different from r1names)
names = list(_[0] for _ in ndtype)
for desc in r2.dtype.descr:
desc = list(desc)
name = desc[0]
# Have we seen the current name already ?
if name in names:
nameidx = ndtype.index(desc)
current = ndtype[nameidx]
# The current field is part of the key: take the largest dtype
if name in key:
current[-1] = max(desc[1], current[-1])
# The current field is not part of the key: add the suffixes
else:
current[0] += r1postfix
desc[0] += r2postfix
ndtype.insert(nameidx + 1, desc)
#... we haven't: just add the description to the current list
else:
names.extend(desc[0])
ndtype.append(desc)
# Revert the elements to tuples
ndtype = [tuple(_) for _ in ndtype]
# Find the largest nb of common fields :
# r1cmn and r2cmn should be equal, but...
cmn = max(r1cmn, r2cmn)
# Construct an empty array
output = ma.masked_all((cmn + r1spc + r2spc,), dtype=ndtype)
names = output.dtype.names
for f in r1names:
selected = s1[f]
if f not in names or (f in r2names and not r2postfix and f not in key):
f += r1postfix
current = output[f]
current[:r1cmn] = selected[:r1cmn]
if jointype in ('outer', 'leftouter'):
current[cmn:cmn + r1spc] = selected[r1cmn:]
for f in r2names:
selected = s2[f]
if f not in names or (f in r1names and not r1postfix and f not in key):
f += r2postfix
current = output[f]
current[:r2cmn] = selected[:r2cmn]
if (jointype == 'outer') and r2spc:
current[-r2spc:] = selected[r2cmn:]
# Sort and finalize the output
output.sort(order=key)
kwargs = dict(usemask=usemask, asrecarray=asrecarray)
return _fix_output(_fix_defaults(output, defaults), **kwargs)
def rec_join(key, r1, r2, jointype='inner', r1postfix='1', r2postfix='2',
defaults=None):
"""
Join arrays `r1` and `r2` on keys.
Alternative to join_by, that always returns a np.recarray.
See Also
--------
join_by : equivalent function
"""
kwargs = dict(jointype=jointype, r1postfix=r1postfix, r2postfix=r2postfix,
defaults=defaults, usemask=False, asrecarray=True)
return join_by(key, r1, r2, **kwargs)
| gpl-3.0 |
natsutan/cocytus | tools/cqt_diff/cqt_diff_yolo.py | 1 | 2396 | import os
import numpy as np
import matplotlib.pyplot as plt
import seaborn
import sys
keras_dir = '../../example/tiny-yolo/keras/output/'
cqt_dir = '../../example/tiny-yolo/c_sdsoc/output/'
qp_file = '../../example/tiny-yolo/c_sdsoc/weight/'
fix16mode = False
def layer_dump(i, q, fnum = 3):
"""
引数で指定されたレイヤーの、Keras出力と、コキュートス出力を
比較して、画像に落とす。比較するフィルターは先頭から、fnum
まで。
出力はoutputディレクトリーに行われる。
:param i:int レイヤー番号
:param q:int 出力データのQ位置
:param fnum:int 画像化するフィルター数
:return:
"""
for f in range(fnum):
plt.figure()
graph_name = 'l%02d_%d' % (i, f)
kname = os.path.join(keras_dir+'l%02d_%d.npy' % (i, f))
cname = os.path.join(cqt_dir+'l%02d.npy' % i)
k_data = np.load(kname).flatten()
c_data = np.load(cname)
c_data_tmp = c_data[f,:,:]
c_data_f = c_data_tmp.flatten()
diff_x = k_data - c_data_f
if fix16mode:
c_data = c_data.astype(np.float32) / (2 ** q)
x = np.arange(len(k_data))
plt.plot(x, k_data, color='b', label='Keras')
plt.plot(x, c_data_f, color='r', label='Cocytus')
plt.title(graph_name)
plt.legend()
img_fname = os.path.join('output', graph_name+'.png')
print('save %s' % img_fname)
plt.savefig(img_fname)
s, e = 0, 1000
plt.figure()
plt.plot(x[s:e], diff_x[s:e], color='g', label='diff')
plt.title(graph_name+'diff')
plt.legend()
img_fname = os.path.join('output', graph_name + '_diff.png')
plt.savefig(img_fname)
def read_qpfile(odir):
"""qpファイルを読み込み、入力、出力、重みのQ位置をリストにして返す"""
iqs = []
wqs = []
oqs = []
fname = os.path.join(odir, 'qp.txt')
for i, l in enumerate(open(fname).readlines()):
if i < 1:
continue
words = l.split(',')
iqs.append(int(words[0]))
oqs.append(int(words[1]))
wqs.append(int(words[2]))
return iqs, oqs, wqs
iqs, oqs, wqs = read_qpfile(qp_file)
#for i in range(31):
# layer_dump(i, oqs[i])
#layer_dump(0, oqs[0])
layer_dump(2, oqs[0])
print('finish')
| mit |
mattilyra/scikit-learn | sklearn/linear_model/logistic.py | 7 | 67572 |
"""
Logistic Regression
"""
# Author: Gael Varoquaux <gael.varoquaux@normalesup.org>
# Fabian Pedregosa <f@bianp.net>
# Alexandre Gramfort <alexandre.gramfort@telecom-paristech.fr>
# Manoj Kumar <manojkumarsivaraj334@gmail.com>
# Lars Buitinck
# Simon Wu <s8wu@uwaterloo.ca>
import numbers
import warnings
import numpy as np
from scipy import optimize, sparse
from .base import LinearClassifierMixin, SparseCoefMixin, BaseEstimator
from .sag import sag_solver
from ..feature_selection.from_model import _LearntSelectorMixin
from ..preprocessing import LabelEncoder, LabelBinarizer
from ..svm.base import _fit_liblinear
from ..utils import check_array, check_consistent_length, compute_class_weight
from ..utils import check_random_state
from ..utils.extmath import (logsumexp, log_logistic, safe_sparse_dot,
softmax, squared_norm)
from ..utils.extmath import row_norms
from ..utils.optimize import newton_cg
from ..utils.validation import check_X_y
from ..exceptions import DataConversionWarning
from ..exceptions import NotFittedError
from ..utils.fixes import expit
from ..utils.multiclass import check_classification_targets
from ..externals.joblib import Parallel, delayed
from ..model_selection import check_cv
from ..externals import six
from ..metrics import SCORERS
# .. some helper functions for logistic_regression_path ..
def _intercept_dot(w, X, y):
"""Computes y * np.dot(X, w).
It takes into consideration if the intercept should be fit or not.
Parameters
----------
w : ndarray, shape (n_features,) or (n_features + 1,)
Coefficient vector.
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data.
y : ndarray, shape (n_samples,)
Array of labels.
Returns
-------
w : ndarray, shape (n_features,)
Coefficient vector without the intercept weight (w[-1]) if the
intercept should be fit. Unchanged otherwise.
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data. Unchanged.
yz : float
y * np.dot(X, w).
"""
c = 0.
if w.size == X.shape[1] + 1:
c = w[-1]
w = w[:-1]
z = safe_sparse_dot(X, w) + c
yz = y * z
return w, c, yz
def _logistic_loss_and_grad(w, X, y, alpha, sample_weight=None):
"""Computes the logistic loss and gradient.
Parameters
----------
w : ndarray, shape (n_features,) or (n_features + 1,)
Coefficient vector.
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data.
y : ndarray, shape (n_samples,)
Array of labels.
alpha : float
Regularization parameter. alpha is equal to 1 / C.
sample_weight : array-like, shape (n_samples,) optional
Array of weights that are assigned to individual samples.
If not provided, then each sample is given unit weight.
Returns
-------
out : float
Logistic loss.
grad : ndarray, shape (n_features,) or (n_features + 1,)
Logistic gradient.
"""
n_samples, n_features = X.shape
grad = np.empty_like(w)
w, c, yz = _intercept_dot(w, X, y)
if sample_weight is None:
sample_weight = np.ones(n_samples)
# Logistic loss is the negative of the log of the logistic function.
out = -np.sum(sample_weight * log_logistic(yz)) + .5 * alpha * np.dot(w, w)
z = expit(yz)
z0 = sample_weight * (z - 1) * y
grad[:n_features] = safe_sparse_dot(X.T, z0) + alpha * w
# Case where we fit the intercept.
if grad.shape[0] > n_features:
grad[-1] = z0.sum()
return out, grad
def _logistic_loss(w, X, y, alpha, sample_weight=None):
"""Computes the logistic loss.
Parameters
----------
w : ndarray, shape (n_features,) or (n_features + 1,)
Coefficient vector.
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data.
y : ndarray, shape (n_samples,)
Array of labels.
alpha : float
Regularization parameter. alpha is equal to 1 / C.
sample_weight : array-like, shape (n_samples,) optional
Array of weights that are assigned to individual samples.
If not provided, then each sample is given unit weight.
Returns
-------
out : float
Logistic loss.
"""
w, c, yz = _intercept_dot(w, X, y)
if sample_weight is None:
sample_weight = np.ones(y.shape[0])
# Logistic loss is the negative of the log of the logistic function.
out = -np.sum(sample_weight * log_logistic(yz)) + .5 * alpha * np.dot(w, w)
return out
def _logistic_grad_hess(w, X, y, alpha, sample_weight=None):
"""Computes the gradient and the Hessian, in the case of a logistic loss.
Parameters
----------
w : ndarray, shape (n_features,) or (n_features + 1,)
Coefficient vector.
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data.
y : ndarray, shape (n_samples,)
Array of labels.
alpha : float
Regularization parameter. alpha is equal to 1 / C.
sample_weight : array-like, shape (n_samples,) optional
Array of weights that are assigned to individual samples.
If not provided, then each sample is given unit weight.
Returns
-------
grad : ndarray, shape (n_features,) or (n_features + 1,)
Logistic gradient.
Hs : callable
Function that takes the gradient as a parameter and returns the
matrix product of the Hessian and gradient.
"""
n_samples, n_features = X.shape
grad = np.empty_like(w)
fit_intercept = grad.shape[0] > n_features
w, c, yz = _intercept_dot(w, X, y)
if sample_weight is None:
sample_weight = np.ones(y.shape[0])
z = expit(yz)
z0 = sample_weight * (z - 1) * y
grad[:n_features] = safe_sparse_dot(X.T, z0) + alpha * w
# Case where we fit the intercept.
if fit_intercept:
grad[-1] = z0.sum()
# The mat-vec product of the Hessian
d = sample_weight * z * (1 - z)
if sparse.issparse(X):
dX = safe_sparse_dot(sparse.dia_matrix((d, 0),
shape=(n_samples, n_samples)), X)
else:
# Precompute as much as possible
dX = d[:, np.newaxis] * X
if fit_intercept:
# Calculate the double derivative with respect to intercept
# In the case of sparse matrices this returns a matrix object.
dd_intercept = np.squeeze(np.array(dX.sum(axis=0)))
def Hs(s):
ret = np.empty_like(s)
ret[:n_features] = X.T.dot(dX.dot(s[:n_features]))
ret[:n_features] += alpha * s[:n_features]
# For the fit intercept case.
if fit_intercept:
ret[:n_features] += s[-1] * dd_intercept
ret[-1] = dd_intercept.dot(s[:n_features])
ret[-1] += d.sum() * s[-1]
return ret
return grad, Hs
def _multinomial_loss(w, X, Y, alpha, sample_weight):
"""Computes multinomial loss and class probabilities.
Parameters
----------
w : ndarray, shape (n_classes * n_features,) or
(n_classes * (n_features + 1),)
Coefficient vector.
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data.
Y : ndarray, shape (n_samples, n_classes)
Transformed labels according to the output of LabelBinarizer.
alpha : float
Regularization parameter. alpha is equal to 1 / C.
sample_weight : array-like, shape (n_samples,) optional
Array of weights that are assigned to individual samples.
If not provided, then each sample is given unit weight.
Returns
-------
loss : float
Multinomial loss.
p : ndarray, shape (n_samples, n_classes)
Estimated class probabilities.
w : ndarray, shape (n_classes, n_features)
Reshaped param vector excluding intercept terms.
Reference
---------
Bishop, C. M. (2006). Pattern recognition and machine learning.
Springer. (Chapter 4.3.4)
"""
n_classes = Y.shape[1]
n_features = X.shape[1]
fit_intercept = w.size == (n_classes * (n_features + 1))
w = w.reshape(n_classes, -1)
sample_weight = sample_weight[:, np.newaxis]
if fit_intercept:
intercept = w[:, -1]
w = w[:, :-1]
else:
intercept = 0
p = safe_sparse_dot(X, w.T)
p += intercept
p -= logsumexp(p, axis=1)[:, np.newaxis]
loss = -(sample_weight * Y * p).sum()
loss += 0.5 * alpha * squared_norm(w)
p = np.exp(p, p)
return loss, p, w
def _multinomial_loss_grad(w, X, Y, alpha, sample_weight):
"""Computes the multinomial loss, gradient and class probabilities.
Parameters
----------
w : ndarray, shape (n_classes * n_features,) or
(n_classes * (n_features + 1),)
Coefficient vector.
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data.
Y : ndarray, shape (n_samples, n_classes)
Transformed labels according to the output of LabelBinarizer.
alpha : float
Regularization parameter. alpha is equal to 1 / C.
sample_weight : array-like, shape (n_samples,) optional
Array of weights that are assigned to individual samples.
Returns
-------
loss : float
Multinomial loss.
grad : ndarray, shape (n_classes * n_features,) or
(n_classes * (n_features + 1),)
Ravelled gradient of the multinomial loss.
p : ndarray, shape (n_samples, n_classes)
Estimated class probabilities
Reference
---------
Bishop, C. M. (2006). Pattern recognition and machine learning.
Springer. (Chapter 4.3.4)
"""
n_classes = Y.shape[1]
n_features = X.shape[1]
fit_intercept = (w.size == n_classes * (n_features + 1))
grad = np.zeros((n_classes, n_features + bool(fit_intercept)))
loss, p, w = _multinomial_loss(w, X, Y, alpha, sample_weight)
sample_weight = sample_weight[:, np.newaxis]
diff = sample_weight * (p - Y)
grad[:, :n_features] = safe_sparse_dot(diff.T, X)
grad[:, :n_features] += alpha * w
if fit_intercept:
grad[:, -1] = diff.sum(axis=0)
return loss, grad.ravel(), p
def _multinomial_grad_hess(w, X, Y, alpha, sample_weight):
"""
Computes the gradient and the Hessian, in the case of a multinomial loss.
Parameters
----------
w : ndarray, shape (n_classes * n_features,) or
(n_classes * (n_features + 1),)
Coefficient vector.
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data.
Y : ndarray, shape (n_samples, n_classes)
Transformed labels according to the output of LabelBinarizer.
alpha : float
Regularization parameter. alpha is equal to 1 / C.
sample_weight : array-like, shape (n_samples,) optional
Array of weights that are assigned to individual samples.
Returns
-------
grad : array, shape (n_classes * n_features,) or
(n_classes * (n_features + 1),)
Ravelled gradient of the multinomial loss.
hessp : callable
Function that takes in a vector input of shape (n_classes * n_features)
or (n_classes * (n_features + 1)) and returns matrix-vector product
with hessian.
References
----------
Barak A. Pearlmutter (1993). Fast Exact Multiplication by the Hessian.
http://www.bcl.hamilton.ie/~barak/papers/nc-hessian.pdf
"""
n_features = X.shape[1]
n_classes = Y.shape[1]
fit_intercept = w.size == (n_classes * (n_features + 1))
# `loss` is unused. Refactoring to avoid computing it does not
# significantly speed up the computation and decreases readability
loss, grad, p = _multinomial_loss_grad(w, X, Y, alpha, sample_weight)
sample_weight = sample_weight[:, np.newaxis]
# Hessian-vector product derived by applying the R-operator on the gradient
# of the multinomial loss function.
def hessp(v):
v = v.reshape(n_classes, -1)
if fit_intercept:
inter_terms = v[:, -1]
v = v[:, :-1]
else:
inter_terms = 0
# r_yhat holds the result of applying the R-operator on the multinomial
# estimator.
r_yhat = safe_sparse_dot(X, v.T)
r_yhat += inter_terms
r_yhat += (-p * r_yhat).sum(axis=1)[:, np.newaxis]
r_yhat *= p
r_yhat *= sample_weight
hessProd = np.zeros((n_classes, n_features + bool(fit_intercept)))
hessProd[:, :n_features] = safe_sparse_dot(r_yhat.T, X)
hessProd[:, :n_features] += v * alpha
if fit_intercept:
hessProd[:, -1] = r_yhat.sum(axis=0)
return hessProd.ravel()
return grad, hessp
def _check_solver_option(solver, multi_class, penalty, dual):
if solver not in ['liblinear', 'newton-cg', 'lbfgs', 'sag']:
raise ValueError("Logistic Regression supports only liblinear,"
" newton-cg, lbfgs and sag solvers, got %s" % solver)
if multi_class not in ['multinomial', 'ovr']:
raise ValueError("multi_class should be either multinomial or "
"ovr, got %s" % multi_class)
if multi_class == 'multinomial' and solver == 'liblinear':
raise ValueError("Solver %s does not support "
"a multinomial backend." % solver)
if solver != 'liblinear':
if penalty != 'l2':
raise ValueError("Solver %s supports only l2 penalties, "
"got %s penalty." % (solver, penalty))
if dual:
raise ValueError("Solver %s supports only "
"dual=False, got dual=%s" % (solver, dual))
def logistic_regression_path(X, y, pos_class=None, Cs=10, fit_intercept=True,
max_iter=100, tol=1e-4, verbose=0,
solver='lbfgs', coef=None, copy=False,
class_weight=None, dual=False, penalty='l2',
intercept_scaling=1., multi_class='ovr',
random_state=None, check_input=True,
max_squared_sum=None, sample_weight=None):
"""Compute a Logistic Regression model for a list of regularization
parameters.
This is an implementation that uses the result of the previous model
to speed up computations along the set of solutions, making it faster
than sequentially calling LogisticRegression for the different parameters.
Read more in the :ref:`User Guide <logistic_regression>`.
Parameters
----------
X : array-like or sparse matrix, shape (n_samples, n_features)
Input data.
y : array-like, shape (n_samples,)
Input data, target values.
Cs : int | array-like, shape (n_cs,)
List of values for the regularization parameter or integer specifying
the number of regularization parameters that should be used. In this
case, the parameters will be chosen in a logarithmic scale between
1e-4 and 1e4.
pos_class : int, None
The class with respect to which we perform a one-vs-all fit.
If None, then it is assumed that the given problem is binary.
fit_intercept : bool
Whether to fit an intercept for the model. In this case the shape of
the returned array is (n_cs, n_features + 1).
max_iter : int
Maximum number of iterations for the solver.
tol : float
Stopping criterion. For the newton-cg and lbfgs solvers, the iteration
will stop when ``max{|g_i | i = 1, ..., n} <= tol``
where ``g_i`` is the i-th component of the gradient.
verbose : int
For the liblinear and lbfgs solvers set verbose to any positive
number for verbosity.
solver : {'lbfgs', 'newton-cg', 'liblinear', 'sag'}
Numerical solver to use.
coef : array-like, shape (n_features,), default None
Initialization value for coefficients of logistic regression.
Useless for liblinear solver.
copy : bool, default False
Whether or not to produce a copy of the data. A copy is not required
anymore. This parameter is deprecated and will be removed in 0.19.
class_weight : dict or 'balanced', optional
Weights associated with classes in the form ``{class_label: weight}``.
If not given, all classes are supposed to have weight one.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``.
Note that these weights will be multiplied with sample_weight (passed
through the fit method) if sample_weight is specified.
dual : bool
Dual or primal formulation. Dual formulation is only implemented for
l2 penalty with liblinear solver. Prefer dual=False when
n_samples > n_features.
penalty : str, 'l1' or 'l2'
Used to specify the norm used in the penalization. The 'newton-cg',
'sag' and 'lbfgs' solvers support only l2 penalties.
intercept_scaling : float, default 1.
Useful only when the solver 'liblinear' is used
and self.fit_intercept is set to True. In this case, x becomes
[x, self.intercept_scaling],
i.e. a "synthetic" feature with constant value equal to
intercept_scaling is appended to the instance vector.
The intercept becomes ``intercept_scaling * synthetic_feature_weight``.
Note! the synthetic feature weight is subject to l1/l2 regularization
as all other features.
To lessen the effect of regularization on synthetic feature weight
(and therefore on the intercept) intercept_scaling has to be increased.
multi_class : str, {'ovr', 'multinomial'}
Multiclass option can be either 'ovr' or 'multinomial'. If the option
chosen is 'ovr', then a binary problem is fit for each label. Else
the loss minimised is the multinomial loss fit across
the entire probability distribution. Works only for the 'lbfgs' and
'newton-cg' solvers.
random_state : int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use when
shuffling the data. Used only in solvers 'sag' and 'liblinear'.
check_input : bool, default True
If False, the input arrays X and y will not be checked.
max_squared_sum : float, default None
Maximum squared sum of X over samples. Used only in SAG solver.
If None, it will be computed, going through all the samples.
The value should be precomputed to speed up cross validation.
sample_weight : array-like, shape(n_samples,) optional
Array of weights that are assigned to individual samples.
If not provided, then each sample is given unit weight.
Returns
-------
coefs : ndarray, shape (n_cs, n_features) or (n_cs, n_features + 1)
List of coefficients for the Logistic Regression model. If
fit_intercept is set to True then the second dimension will be
n_features + 1, where the last item represents the intercept.
Cs : ndarray
Grid of Cs used for cross-validation.
n_iter : array, shape (n_cs,)
Actual number of iteration for each Cs.
Notes
-----
You might get slightly different results with the solver liblinear than
with the others since this uses LIBLINEAR which penalizes the intercept.
"""
if copy:
warnings.warn("A copy is not required anymore. The 'copy' parameter "
"is deprecated and will be removed in 0.19.",
DeprecationWarning)
if isinstance(Cs, numbers.Integral):
Cs = np.logspace(-4, 4, Cs)
_check_solver_option(solver, multi_class, penalty, dual)
# Preprocessing.
if check_input or copy:
X = check_array(X, accept_sparse='csr', dtype=np.float64)
y = check_array(y, ensure_2d=False, copy=copy, dtype=None)
check_consistent_length(X, y)
_, n_features = X.shape
classes = np.unique(y)
random_state = check_random_state(random_state)
if pos_class is None and multi_class != 'multinomial':
if (classes.size > 2):
raise ValueError('To fit OvR, use the pos_class argument')
# np.unique(y) gives labels in sorted order.
pos_class = classes[1]
# If sample weights exist, convert them to array (support for lists)
# and check length
# Otherwise set them to 1 for all examples
if sample_weight is not None:
sample_weight = np.array(sample_weight, dtype=np.float64, order='C')
check_consistent_length(y, sample_weight)
else:
sample_weight = np.ones(X.shape[0])
# If class_weights is a dict (provided by the user), the weights
# are assigned to the original labels. If it is "balanced", then
# the class_weights are assigned after masking the labels with a OvR.
le = LabelEncoder()
if isinstance(class_weight, dict) or multi_class == 'multinomial':
class_weight_ = compute_class_weight(class_weight, classes, y)
sample_weight *= class_weight_[le.fit_transform(y)]
# For doing a ovr, we need to mask the labels first. for the
# multinomial case this is not necessary.
if multi_class == 'ovr':
w0 = np.zeros(n_features + int(fit_intercept))
mask_classes = np.array([-1, 1])
mask = (y == pos_class)
y_bin = np.ones(y.shape, dtype=np.float64)
y_bin[~mask] = -1.
# for compute_class_weight
# 'auto' is deprecated and will be removed in 0.19
if class_weight in ("auto", "balanced"):
class_weight_ = compute_class_weight(class_weight, mask_classes,
y_bin)
sample_weight *= class_weight_[le.fit_transform(y_bin)]
else:
if solver != 'sag':
lbin = LabelBinarizer()
Y_multi = lbin.fit_transform(y)
if Y_multi.shape[1] == 1:
Y_multi = np.hstack([1 - Y_multi, Y_multi])
else:
# SAG multinomial solver needs LabelEncoder, not LabelBinarizer
le = LabelEncoder()
Y_multi = le.fit_transform(y)
w0 = np.zeros((classes.size, n_features + int(fit_intercept)),
order='F')
if coef is not None:
# it must work both giving the bias term and not
if multi_class == 'ovr':
if coef.size not in (n_features, w0.size):
raise ValueError(
'Initialization coef is of shape %d, expected shape '
'%d or %d' % (coef.size, n_features, w0.size))
w0[:coef.size] = coef
else:
# For binary problems coef.shape[0] should be 1, otherwise it
# should be classes.size.
n_classes = classes.size
if n_classes == 2:
n_classes = 1
if (coef.shape[0] != n_classes or
coef.shape[1] not in (n_features, n_features + 1)):
raise ValueError(
'Initialization coef is of shape (%d, %d), expected '
'shape (%d, %d) or (%d, %d)' % (
coef.shape[0], coef.shape[1], classes.size,
n_features, classes.size, n_features + 1))
w0[:, :coef.shape[1]] = coef
if multi_class == 'multinomial':
# fmin_l_bfgs_b and newton-cg accepts only ravelled parameters.
if solver in ['lbfgs', 'newton-cg']:
w0 = w0.ravel()
target = Y_multi
if solver == 'lbfgs':
func = lambda x, *args: _multinomial_loss_grad(x, *args)[0:2]
elif solver == 'newton-cg':
func = lambda x, *args: _multinomial_loss(x, *args)[0]
grad = lambda x, *args: _multinomial_loss_grad(x, *args)[1]
hess = _multinomial_grad_hess
warm_start_sag = {'coef': w0.T}
else:
target = y_bin
if solver == 'lbfgs':
func = _logistic_loss_and_grad
elif solver == 'newton-cg':
func = _logistic_loss
grad = lambda x, *args: _logistic_loss_and_grad(x, *args)[1]
hess = _logistic_grad_hess
warm_start_sag = {'coef': np.expand_dims(w0, axis=1)}
coefs = list()
n_iter = np.zeros(len(Cs), dtype=np.int32)
for i, C in enumerate(Cs):
if solver == 'lbfgs':
try:
w0, loss, info = optimize.fmin_l_bfgs_b(
func, w0, fprime=None,
args=(X, target, 1. / C, sample_weight),
iprint=(verbose > 0) - 1, pgtol=tol, maxiter=max_iter)
except TypeError:
# old scipy doesn't have maxiter
w0, loss, info = optimize.fmin_l_bfgs_b(
func, w0, fprime=None,
args=(X, target, 1. / C, sample_weight),
iprint=(verbose > 0) - 1, pgtol=tol)
if info["warnflag"] == 1 and verbose > 0:
warnings.warn("lbfgs failed to converge. Increase the number "
"of iterations.")
try:
n_iter_i = info['nit'] - 1
except:
n_iter_i = info['funcalls'] - 1
elif solver == 'newton-cg':
args = (X, target, 1. / C, sample_weight)
w0, n_iter_i = newton_cg(hess, func, grad, w0, args=args,
maxiter=max_iter, tol=tol)
elif solver == 'liblinear':
coef_, intercept_, n_iter_i, = _fit_liblinear(
X, target, C, fit_intercept, intercept_scaling, None,
penalty, dual, verbose, max_iter, tol, random_state,
sample_weight=sample_weight)
if fit_intercept:
w0 = np.concatenate([coef_.ravel(), intercept_])
else:
w0 = coef_.ravel()
elif solver == 'sag':
if multi_class == 'multinomial':
target = target.astype(np.float64)
loss = 'multinomial'
else:
loss = 'log'
w0, n_iter_i, warm_start_sag = sag_solver(
X, target, sample_weight, loss, 1. / C, max_iter, tol,
verbose, random_state, False, max_squared_sum, warm_start_sag)
else:
raise ValueError("solver must be one of {'liblinear', 'lbfgs', "
"'newton-cg', 'sag'}, got '%s' instead" % solver)
if multi_class == 'multinomial':
multi_w0 = np.reshape(w0, (classes.size, -1))
if classes.size == 2:
multi_w0 = multi_w0[1][np.newaxis, :]
coefs.append(multi_w0)
else:
coefs.append(w0.copy())
n_iter[i] = n_iter_i
return coefs, np.array(Cs), n_iter
# helper function for LogisticCV
def _log_reg_scoring_path(X, y, train, test, pos_class=None, Cs=10,
scoring=None, fit_intercept=False,
max_iter=100, tol=1e-4, class_weight=None,
verbose=0, solver='lbfgs', penalty='l2',
dual=False, intercept_scaling=1.,
multi_class='ovr', random_state=None,
max_squared_sum=None, sample_weight=None):
"""Computes scores across logistic_regression_path
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data.
y : array-like, shape (n_samples,) or (n_samples, n_targets)
Target labels.
train : list of indices
The indices of the train set.
test : list of indices
The indices of the test set.
pos_class : int, None
The class with respect to which we perform a one-vs-all fit.
If None, then it is assumed that the given problem is binary.
Cs : list of floats | int
Each of the values in Cs describes the inverse of
regularization strength. If Cs is as an int, then a grid of Cs
values are chosen in a logarithmic scale between 1e-4 and 1e4.
If not provided, then a fixed set of values for Cs are used.
scoring : callable or None, optional, default: None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``. For a list of scoring functions
that can be used, look at :mod:`sklearn.metrics`. The
default scoring option used is accuracy_score.
fit_intercept : bool
If False, then the bias term is set to zero. Else the last
term of each coef_ gives us the intercept.
max_iter : int
Maximum number of iterations for the solver.
tol : float
Tolerance for stopping criteria.
class_weight : dict or 'balanced', optional
Weights associated with classes in the form ``{class_label: weight}``.
If not given, all classes are supposed to have weight one.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
Note that these weights will be multiplied with sample_weight (passed
through the fit method) if sample_weight is specified.
verbose : int
For the liblinear and lbfgs solvers set verbose to any positive
number for verbosity.
solver : {'lbfgs', 'newton-cg', 'liblinear', 'sag'}
Decides which solver to use.
penalty : str, 'l1' or 'l2'
Used to specify the norm used in the penalization. The 'newton-cg',
'sag' and 'lbfgs' solvers support only l2 penalties.
dual : bool
Dual or primal formulation. Dual formulation is only implemented for
l2 penalty with liblinear solver. Prefer dual=False when
n_samples > n_features.
intercept_scaling : float, default 1.
Useful only when the solver 'liblinear' is used
and self.fit_intercept is set to True. In this case, x becomes
[x, self.intercept_scaling],
i.e. a "synthetic" feature with constant value equals to
intercept_scaling is appended to the instance vector.
The intercept becomes intercept_scaling * synthetic feature weight
Note! the synthetic feature weight is subject to l1/l2 regularization
as all other features.
To lessen the effect of regularization on synthetic feature weight
(and therefore on the intercept) intercept_scaling has to be increased.
multi_class : str, {'ovr', 'multinomial'}
Multiclass option can be either 'ovr' or 'multinomial'. If the option
chosen is 'ovr', then a binary problem is fit for each label. Else
the loss minimised is the multinomial loss fit across
the entire probability distribution. Works only for the 'lbfgs' and
'newton-cg' solver.
random_state : int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use when
shuffling the data. Used only in solvers 'sag' and 'liblinear'.
max_squared_sum : float, default None
Maximum squared sum of X over samples. Used only in SAG solver.
If None, it will be computed, going through all the samples.
The value should be precomputed to speed up cross validation.
sample_weight : array-like, shape(n_samples,) optional
Array of weights that are assigned to individual samples.
If not provided, then each sample is given unit weight.
Returns
-------
coefs : ndarray, shape (n_cs, n_features) or (n_cs, n_features + 1)
List of coefficients for the Logistic Regression model. If
fit_intercept is set to True then the second dimension will be
n_features + 1, where the last item represents the intercept.
Cs : ndarray
Grid of Cs used for cross-validation.
scores : ndarray, shape (n_cs,)
Scores obtained for each Cs.
n_iter : array, shape(n_cs,)
Actual number of iteration for each Cs.
"""
_check_solver_option(solver, multi_class, penalty, dual)
X_train = X[train]
X_test = X[test]
y_train = y[train]
y_test = y[test]
if sample_weight is not None:
sample_weight = sample_weight[train]
coefs, Cs, n_iter = logistic_regression_path(
X_train, y_train, Cs=Cs, fit_intercept=fit_intercept,
solver=solver, max_iter=max_iter, class_weight=class_weight,
pos_class=pos_class, multi_class=multi_class,
tol=tol, verbose=verbose, dual=dual, penalty=penalty,
intercept_scaling=intercept_scaling, random_state=random_state,
check_input=False, max_squared_sum=max_squared_sum,
sample_weight=sample_weight)
log_reg = LogisticRegression(fit_intercept=fit_intercept)
# The score method of Logistic Regression has a classes_ attribute.
if multi_class == 'ovr':
log_reg.classes_ = np.array([-1, 1])
elif multi_class == 'multinomial':
log_reg.classes_ = np.unique(y_train)
else:
raise ValueError("multi_class should be either multinomial or ovr, "
"got %d" % multi_class)
if pos_class is not None:
mask = (y_test == pos_class)
y_test = np.ones(y_test.shape, dtype=np.float64)
y_test[~mask] = -1.
# To deal with object dtypes, we need to convert into an array of floats.
y_test = check_array(y_test, dtype=np.float64, ensure_2d=False)
scores = list()
if isinstance(scoring, six.string_types):
scoring = SCORERS[scoring]
for w in coefs:
if multi_class == 'ovr':
w = w[np.newaxis, :]
if fit_intercept:
log_reg.coef_ = w[:, :-1]
log_reg.intercept_ = w[:, -1]
else:
log_reg.coef_ = w
log_reg.intercept_ = 0.
if scoring is None:
scores.append(log_reg.score(X_test, y_test))
else:
scores.append(scoring(log_reg, X_test, y_test))
return coefs, Cs, np.array(scores), n_iter
class LogisticRegression(BaseEstimator, LinearClassifierMixin,
_LearntSelectorMixin, SparseCoefMixin):
"""Logistic Regression (aka logit, MaxEnt) classifier.
In the multiclass case, the training algorithm uses the one-vs-rest (OvR)
scheme if the 'multi_class' option is set to 'ovr', and uses the cross-
entropy loss if the 'multi_class' option is set to 'multinomial'.
(Currently the 'multinomial' option is supported only by the 'lbfgs',
'sag' and 'newton-cg' solvers.)
This class implements regularized logistic regression using the
'liblinear' library, 'newton-cg', 'sag' and 'lbfgs' solvers. It can handle
both dense and sparse input. Use C-ordered arrays or CSR matrices
containing 64-bit floats for optimal performance; any other input format
will be converted (and copied).
The 'newton-cg', 'sag', and 'lbfgs' solvers support only L2 regularization
with primal formulation. The 'liblinear' solver supports both L1 and L2
regularization, with a dual formulation only for the L2 penalty.
Read more in the :ref:`User Guide <logistic_regression>`.
Parameters
----------
penalty : str, 'l1' or 'l2', default: 'l2'
Used to specify the norm used in the penalization. The 'newton-cg',
'sag' and 'lbfgs' solvers support only l2 penalties.
dual : bool, default: False
Dual or primal formulation. Dual formulation is only implemented for
l2 penalty with liblinear solver. Prefer dual=False when
n_samples > n_features.
C : float, default: 1.0
Inverse of regularization strength; must be a positive float.
Like in support vector machines, smaller values specify stronger
regularization.
fit_intercept : bool, default: True
Specifies if a constant (a.k.a. bias or intercept) should be
added to the decision function.
intercept_scaling : float, default 1.
Useful only when the solver 'liblinear' is used
and self.fit_intercept is set to True. In this case, x becomes
[x, self.intercept_scaling],
i.e. a "synthetic" feature with constant value equal to
intercept_scaling is appended to the instance vector.
The intercept becomes ``intercept_scaling * synthetic_feature_weight``.
Note! the synthetic feature weight is subject to l1/l2 regularization
as all other features.
To lessen the effect of regularization on synthetic feature weight
(and therefore on the intercept) intercept_scaling has to be increased.
class_weight : dict or 'balanced', default: None
Weights associated with classes in the form ``{class_label: weight}``.
If not given, all classes are supposed to have weight one.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``.
Note that these weights will be multiplied with sample_weight (passed
through the fit method) if sample_weight is specified.
.. versionadded:: 0.17
*class_weight='balanced'* instead of deprecated
*class_weight='auto'*.
max_iter : int, default: 100
Useful only for the newton-cg, sag and lbfgs solvers.
Maximum number of iterations taken for the solvers to converge.
random_state : int seed, RandomState instance, default: None
The seed of the pseudo random number generator to use when
shuffling the data. Used only in solvers 'sag' and 'liblinear'.
solver : {'newton-cg', 'lbfgs', 'liblinear', 'sag'}, default: 'liblinear'
Algorithm to use in the optimization problem.
- For small datasets, 'liblinear' is a good choice, whereas 'sag' is
faster for large ones.
- For multiclass problems, only 'newton-cg', 'sag' and 'lbfgs' handle
multinomial loss; 'liblinear' is limited to one-versus-rest
schemes.
- 'newton-cg', 'lbfgs' and 'sag' only handle L2 penalty.
Note that 'sag' fast convergence is only guaranteed on features with
approximately the same scale. You can preprocess the data with a
scaler from sklearn.preprocessing.
.. versionadded:: 0.17
Stochastic Average Gradient descent solver.
tol : float, default: 1e-4
Tolerance for stopping criteria.
multi_class : str, {'ovr', 'multinomial'}, default: 'ovr'
Multiclass option can be either 'ovr' or 'multinomial'. If the option
chosen is 'ovr', then a binary problem is fit for each label. Else
the loss minimised is the multinomial loss fit across
the entire probability distribution. Works only for the 'newton-cg',
'sag' and 'lbfgs' solver.
.. versionadded:: 0.18
Stochastic Average Gradient descent solver for 'multinomial' case.
verbose : int, default: 0
For the liblinear and lbfgs solvers set verbose to any positive
number for verbosity.
warm_start : bool, default: False
When set to True, reuse the solution of the previous call to fit as
initialization, otherwise, just erase the previous solution.
Useless for liblinear solver.
.. versionadded:: 0.17
*warm_start* to support *lbfgs*, *newton-cg*, *sag* solvers.
n_jobs : int, default: 1
Number of CPU cores used during the cross-validation loop. If given
a value of -1, all cores are used.
Attributes
----------
coef_ : array, shape (n_classes, n_features)
Coefficient of the features in the decision function.
intercept_ : array, shape (n_classes,)
Intercept (a.k.a. bias) added to the decision function.
If `fit_intercept` is set to False, the intercept is set to zero.
n_iter_ : array, shape (n_classes,) or (1, )
Actual number of iterations for all classes. If binary or multinomial,
it returns only 1 element. For liblinear solver, only the maximum
number of iteration across all classes is given.
See also
--------
SGDClassifier : incrementally trained logistic regression (when given
the parameter ``loss="log"``).
sklearn.svm.LinearSVC : learns SVM models using the same algorithm.
Notes
-----
The underlying C implementation uses a random number generator to
select features when fitting the model. It is thus not uncommon,
to have slightly different results for the same input data. If
that happens, try with a smaller tol parameter.
Predict output may not match that of standalone liblinear in certain
cases. See :ref:`differences from liblinear <liblinear_differences>`
in the narrative documentation.
References
----------
LIBLINEAR -- A Library for Large Linear Classification
http://www.csie.ntu.edu.tw/~cjlin/liblinear/
Hsiang-Fu Yu, Fang-Lan Huang, Chih-Jen Lin (2011). Dual coordinate descent
methods for logistic regression and maximum entropy models.
Machine Learning 85(1-2):41-75.
http://www.csie.ntu.edu.tw/~cjlin/papers/maxent_dual.pdf
"""
def __init__(self, penalty='l2', dual=False, tol=1e-4, C=1.0,
fit_intercept=True, intercept_scaling=1, class_weight=None,
random_state=None, solver='liblinear', max_iter=100,
multi_class='ovr', verbose=0, warm_start=False, n_jobs=1):
self.penalty = penalty
self.dual = dual
self.tol = tol
self.C = C
self.fit_intercept = fit_intercept
self.intercept_scaling = intercept_scaling
self.class_weight = class_weight
self.random_state = random_state
self.solver = solver
self.max_iter = max_iter
self.multi_class = multi_class
self.verbose = verbose
self.warm_start = warm_start
self.n_jobs = n_jobs
def fit(self, X, y, sample_weight=None):
"""Fit the model according to the given training data.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training vector, where n_samples in the number of samples and
n_features is the number of features.
y : array-like, shape (n_samples,)
Target vector relative to X.
sample_weight : array-like, shape (n_samples,) optional
Array of weights that are assigned to individual samples.
If not provided, then each sample is given unit weight.
.. versionadded:: 0.17
*sample_weight* support to LogisticRegression.
Returns
-------
self : object
Returns self.
"""
if not isinstance(self.C, numbers.Number) or self.C < 0:
raise ValueError("Penalty term must be positive; got (C=%r)"
% self.C)
if not isinstance(self.max_iter, numbers.Number) or self.max_iter < 0:
raise ValueError("Maximum number of iteration must be positive;"
" got (max_iter=%r)" % self.max_iter)
if not isinstance(self.tol, numbers.Number) or self.tol < 0:
raise ValueError("Tolerance for stopping criteria must be "
"positive; got (tol=%r)" % self.tol)
X, y = check_X_y(X, y, accept_sparse='csr', dtype=np.float64,
order="C")
check_classification_targets(y)
self.classes_ = np.unique(y)
n_samples, n_features = X.shape
_check_solver_option(self.solver, self.multi_class, self.penalty,
self.dual)
if self.solver == 'liblinear':
self.coef_, self.intercept_, n_iter_ = _fit_liblinear(
X, y, self.C, self.fit_intercept, self.intercept_scaling,
self.class_weight, self.penalty, self.dual, self.verbose,
self.max_iter, self.tol, self.random_state,
sample_weight=sample_weight)
self.n_iter_ = np.array([n_iter_])
return self
if self.solver == 'sag':
max_squared_sum = row_norms(X, squared=True).max()
else:
max_squared_sum = None
n_classes = len(self.classes_)
classes_ = self.classes_
if n_classes < 2:
raise ValueError("This solver needs samples of at least 2 classes"
" in the data, but the data contains only one"
" class: %r" % classes_[0])
if len(self.classes_) == 2:
n_classes = 1
classes_ = classes_[1:]
if self.warm_start:
warm_start_coef = getattr(self, 'coef_', None)
else:
warm_start_coef = None
if warm_start_coef is not None and self.fit_intercept:
warm_start_coef = np.append(warm_start_coef,
self.intercept_[:, np.newaxis],
axis=1)
self.coef_ = list()
self.intercept_ = np.zeros(n_classes)
# Hack so that we iterate only once for the multinomial case.
if self.multi_class == 'multinomial':
classes_ = [None]
warm_start_coef = [warm_start_coef]
if warm_start_coef is None:
warm_start_coef = [None] * n_classes
path_func = delayed(logistic_regression_path)
# The SAG solver releases the GIL so it's more efficient to use
# threads for this solver.
backend = 'threading' if self.solver == 'sag' else 'multiprocessing'
fold_coefs_ = Parallel(n_jobs=self.n_jobs, verbose=self.verbose,
backend=backend)(
path_func(X, y, pos_class=class_, Cs=[self.C],
fit_intercept=self.fit_intercept, tol=self.tol,
verbose=self.verbose, solver=self.solver, copy=False,
multi_class=self.multi_class, max_iter=self.max_iter,
class_weight=self.class_weight, check_input=False,
random_state=self.random_state, coef=warm_start_coef_,
max_squared_sum=max_squared_sum,
sample_weight=sample_weight)
for (class_, warm_start_coef_) in zip(classes_, warm_start_coef))
fold_coefs_, _, n_iter_ = zip(*fold_coefs_)
self.n_iter_ = np.asarray(n_iter_, dtype=np.int32)[:, 0]
if self.multi_class == 'multinomial':
self.coef_ = fold_coefs_[0][0]
else:
self.coef_ = np.asarray(fold_coefs_)
self.coef_ = self.coef_.reshape(n_classes, n_features +
int(self.fit_intercept))
if self.fit_intercept:
self.intercept_ = self.coef_[:, -1]
self.coef_ = self.coef_[:, :-1]
return self
def predict_proba(self, X):
"""Probability estimates.
The returned estimates for all classes are ordered by the
label of classes.
For a multi_class problem, if multi_class is set to be "multinomial"
the softmax function is used to find the predicted probability of
each class.
Else use a one-vs-rest approach, i.e calculate the probability
of each class assuming it to be positive using the logistic function.
and normalize these values across all the classes.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
T : array-like, shape = [n_samples, n_classes]
Returns the probability of the sample for each class in the model,
where classes are ordered as they are in ``self.classes_``.
"""
if not hasattr(self, "coef_"):
raise NotFittedError("Call fit before prediction")
calculate_ovr = self.coef_.shape[0] == 1 or self.multi_class == "ovr"
if calculate_ovr:
return super(LogisticRegression, self)._predict_proba_lr(X)
else:
return softmax(self.decision_function(X), copy=False)
def predict_log_proba(self, X):
"""Log of probability estimates.
The returned estimates for all classes are ordered by the
label of classes.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
T : array-like, shape = [n_samples, n_classes]
Returns the log-probability of the sample for each class in the
model, where classes are ordered as they are in ``self.classes_``.
"""
return np.log(self.predict_proba(X))
class LogisticRegressionCV(LogisticRegression, BaseEstimator,
LinearClassifierMixin, _LearntSelectorMixin):
"""Logistic Regression CV (aka logit, MaxEnt) classifier.
This class implements logistic regression using liblinear, newton-cg, sag
of lbfgs optimizer. The newton-cg, sag and lbfgs solvers support only L2
regularization with primal formulation. The liblinear solver supports both
L1 and L2 regularization, with a dual formulation only for the L2 penalty.
For the grid of Cs values (that are set by default to be ten values in
a logarithmic scale between 1e-4 and 1e4), the best hyperparameter is
selected by the cross-validator StratifiedKFold, but it can be changed
using the cv parameter. In the case of newton-cg and lbfgs solvers,
we warm start along the path i.e guess the initial coefficients of the
present fit to be the coefficients got after convergence in the previous
fit, so it is supposed to be faster for high-dimensional dense data.
For a multiclass problem, the hyperparameters for each class are computed
using the best scores got by doing a one-vs-rest in parallel across all
folds and classes. Hence this is not the true multinomial loss.
Read more in the :ref:`User Guide <logistic_regression>`.
Parameters
----------
Cs : list of floats | int
Each of the values in Cs describes the inverse of regularization
strength. If Cs is as an int, then a grid of Cs values are chosen
in a logarithmic scale between 1e-4 and 1e4.
Like in support vector machines, smaller values specify stronger
regularization.
fit_intercept : bool, default: True
Specifies if a constant (a.k.a. bias or intercept) should be
added to the decision function.
class_weight : dict or 'balanced', optional
Weights associated with classes in the form ``{class_label: weight}``.
If not given, all classes are supposed to have weight one.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``.
Note that these weights will be multiplied with sample_weight (passed
through the fit method) if sample_weight is specified.
.. versionadded:: 0.17
class_weight == 'balanced'
cv : integer or cross-validation generator
The default cross-validation generator used is Stratified K-Folds.
If an integer is provided, then it is the number of folds used.
See the module :mod:`sklearn.model_selection` module for the
list of possible cross-validation objects.
penalty : str, 'l1' or 'l2'
Used to specify the norm used in the penalization. The 'newton-cg',
'sag' and 'lbfgs' solvers support only l2 penalties.
dual : bool
Dual or primal formulation. Dual formulation is only implemented for
l2 penalty with liblinear solver. Prefer dual=False when
n_samples > n_features.
scoring : callabale
Scoring function to use as cross-validation criteria. For a list of
scoring functions that can be used, look at :mod:`sklearn.metrics`.
The default scoring option used is accuracy_score.
solver : {'newton-cg', 'lbfgs', 'liblinear', 'sag'}
Algorithm to use in the optimization problem.
- For small datasets, 'liblinear' is a good choice, whereas 'sag' is
faster for large ones.
- For multiclass problems, only 'newton-cg', 'sag' and 'lbfgs' handle
multinomial loss; 'liblinear' is limited to one-versus-rest
schemes.
- 'newton-cg', 'lbfgs' and 'sag' only handle L2 penalty.
- 'liblinear' might be slower in LogisticRegressionCV because it does
not handle warm-starting.
Note that 'sag' fast convergence is only guaranteed on features with
approximately the same scale. You can preprocess the data with a
scaler from sklearn.preprocessing.
.. versionadded:: 0.17
Stochastic Average Gradient descent solver.
tol : float, optional
Tolerance for stopping criteria.
max_iter : int, optional
Maximum number of iterations of the optimization algorithm.
n_jobs : int, optional
Number of CPU cores used during the cross-validation loop. If given
a value of -1, all cores are used.
verbose : int
For the 'liblinear', 'sag' and 'lbfgs' solvers set verbose to any
positive number for verbosity.
refit : bool
If set to True, the scores are averaged across all folds, and the
coefs and the C that corresponds to the best score is taken, and a
final refit is done using these parameters.
Otherwise the coefs, intercepts and C that correspond to the
best scores across folds are averaged.
multi_class : str, {'ovr', 'multinomial'}
Multiclass option can be either 'ovr' or 'multinomial'. If the option
chosen is 'ovr', then a binary problem is fit for each label. Else
the loss minimised is the multinomial loss fit across
the entire probability distribution. Works only for the 'newton-cg',
'sag' and 'lbfgs' solver.
.. versionadded:: 0.18
Stochastic Average Gradient descent solver for 'multinomial' case.
intercept_scaling : float, default 1.
Useful only when the solver 'liblinear' is used
and self.fit_intercept is set to True. In this case, x becomes
[x, self.intercept_scaling],
i.e. a "synthetic" feature with constant value equal to
intercept_scaling is appended to the instance vector.
The intercept becomes ``intercept_scaling * synthetic_feature_weight``.
Note! the synthetic feature weight is subject to l1/l2 regularization
as all other features.
To lessen the effect of regularization on synthetic feature weight
(and therefore on the intercept) intercept_scaling has to be increased.
random_state : int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use when
shuffling the data.
Attributes
----------
coef_ : array, shape (1, n_features) or (n_classes, n_features)
Coefficient of the features in the decision function.
`coef_` is of shape (1, n_features) when the given problem
is binary.
`coef_` is readonly property derived from `raw_coef_` that
follows the internal memory layout of liblinear.
intercept_ : array, shape (1,) or (n_classes,)
Intercept (a.k.a. bias) added to the decision function.
It is available only when parameter intercept is set to True
and is of shape(1,) when the problem is binary.
Cs_ : array
Array of C i.e. inverse of regularization parameter values used
for cross-validation.
coefs_paths_ : array, shape ``(n_folds, len(Cs_), n_features)`` or \
``(n_folds, len(Cs_), n_features + 1)``
dict with classes as the keys, and the path of coefficients obtained
during cross-validating across each fold and then across each Cs
after doing an OvR for the corresponding class as values.
If the 'multi_class' option is set to 'multinomial', then
the coefs_paths are the coefficients corresponding to each class.
Each dict value has shape ``(n_folds, len(Cs_), n_features)`` or
``(n_folds, len(Cs_), n_features + 1)`` depending on whether the
intercept is fit or not.
scores_ : dict
dict with classes as the keys, and the values as the
grid of scores obtained during cross-validating each fold, after doing
an OvR for the corresponding class. If the 'multi_class' option
given is 'multinomial' then the same scores are repeated across
all classes, since this is the multinomial class.
Each dict value has shape (n_folds, len(Cs))
C_ : array, shape (n_classes,) or (n_classes - 1,)
Array of C that maps to the best scores across every class. If refit is
set to False, then for each class, the best C is the average of the
C's that correspond to the best scores for each fold.
n_iter_ : array, shape (n_classes, n_folds, n_cs) or (1, n_folds, n_cs)
Actual number of iterations for all classes, folds and Cs.
In the binary or multinomial cases, the first dimension is equal to 1.
See also
--------
LogisticRegression
"""
def __init__(self, Cs=10, fit_intercept=True, cv=None, dual=False,
penalty='l2', scoring=None, solver='lbfgs', tol=1e-4,
max_iter=100, class_weight=None, n_jobs=1, verbose=0,
refit=True, intercept_scaling=1., multi_class='ovr',
random_state=None):
self.Cs = Cs
self.fit_intercept = fit_intercept
self.cv = cv
self.dual = dual
self.penalty = penalty
self.scoring = scoring
self.tol = tol
self.max_iter = max_iter
self.class_weight = class_weight
self.n_jobs = n_jobs
self.verbose = verbose
self.solver = solver
self.refit = refit
self.intercept_scaling = intercept_scaling
self.multi_class = multi_class
self.random_state = random_state
def fit(self, X, y, sample_weight=None):
"""Fit the model according to the given training data.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training vector, where n_samples in the number of samples and
n_features is the number of features.
y : array-like, shape (n_samples,)
Target vector relative to X.
sample_weight : array-like, shape (n_samples,) optional
Array of weights that are assigned to individual samples.
If not provided, then each sample is given unit weight.
Returns
-------
self : object
Returns self.
"""
_check_solver_option(self.solver, self.multi_class, self.penalty,
self.dual)
if not isinstance(self.max_iter, numbers.Number) or self.max_iter < 0:
raise ValueError("Maximum number of iteration must be positive;"
" got (max_iter=%r)" % self.max_iter)
if not isinstance(self.tol, numbers.Number) or self.tol < 0:
raise ValueError("Tolerance for stopping criteria must be "
"positive; got (tol=%r)" % self.tol)
X, y = check_X_y(X, y, accept_sparse='csr', dtype=np.float64,
order="C")
if self.solver == 'sag':
max_squared_sum = row_norms(X, squared=True).max()
else:
max_squared_sum = None
check_classification_targets(y)
if y.ndim == 2 and y.shape[1] == 1:
warnings.warn(
"A column-vector y was passed when a 1d array was"
" expected. Please change the shape of y to "
"(n_samples, ), for example using ravel().",
DataConversionWarning)
y = np.ravel(y)
check_consistent_length(X, y)
# init cross-validation generator
cv = check_cv(self.cv, y, classifier=True)
folds = list(cv.split(X, y))
self._enc = LabelEncoder()
self._enc.fit(y)
labels = self.classes_ = np.unique(y)
n_classes = len(labels)
if n_classes < 2:
raise ValueError("This solver needs samples of at least 2 classes"
" in the data, but the data contains only one"
" class: %r" % self.classes_[0])
if n_classes == 2:
# OvR in case of binary problems is as good as fitting
# the higher label
n_classes = 1
labels = labels[1:]
# We need this hack to iterate only once over labels, in the case of
# multi_class = multinomial, without changing the value of the labels.
iter_labels = labels
if self.multi_class == 'multinomial':
iter_labels = [None]
if self.class_weight and not(isinstance(self.class_weight, dict) or
self.class_weight in
['balanced', 'auto']):
# 'auto' is deprecated and will be removed in 0.19
raise ValueError("class_weight provided should be a "
"dict or 'balanced'")
# compute the class weights for the entire dataset y
if self.class_weight in ("auto", "balanced"):
classes = np.unique(y)
class_weight = compute_class_weight(self.class_weight, classes, y)
class_weight = dict(zip(classes, class_weight))
else:
class_weight = self.class_weight
path_func = delayed(_log_reg_scoring_path)
# The SAG solver releases the GIL so it's more efficient to use
# threads for this solver.
backend = 'threading' if self.solver == 'sag' else 'multiprocessing'
fold_coefs_ = Parallel(n_jobs=self.n_jobs, verbose=self.verbose,
backend=backend)(
path_func(X, y, train, test, pos_class=label, Cs=self.Cs,
fit_intercept=self.fit_intercept, penalty=self.penalty,
dual=self.dual, solver=self.solver, tol=self.tol,
max_iter=self.max_iter, verbose=self.verbose,
class_weight=class_weight, scoring=self.scoring,
multi_class=self.multi_class,
intercept_scaling=self.intercept_scaling,
random_state=self.random_state,
max_squared_sum=max_squared_sum,
sample_weight=sample_weight
)
for label in iter_labels
for train, test in folds)
if self.multi_class == 'multinomial':
multi_coefs_paths, Cs, multi_scores, n_iter_ = zip(*fold_coefs_)
multi_coefs_paths = np.asarray(multi_coefs_paths)
multi_scores = np.asarray(multi_scores)
# This is just to maintain API similarity between the ovr and
# multinomial option.
# Coefs_paths in now n_folds X len(Cs) X n_classes X n_features
# we need it to be n_classes X len(Cs) X n_folds X n_features
# to be similar to "ovr".
coefs_paths = np.rollaxis(multi_coefs_paths, 2, 0)
# Multinomial has a true score across all labels. Hence the
# shape is n_folds X len(Cs). We need to repeat this score
# across all labels for API similarity.
scores = np.tile(multi_scores, (n_classes, 1, 1))
self.Cs_ = Cs[0]
self.n_iter_ = np.reshape(n_iter_, (1, len(folds),
len(self.Cs_)))
else:
coefs_paths, Cs, scores, n_iter_ = zip(*fold_coefs_)
self.Cs_ = Cs[0]
coefs_paths = np.reshape(coefs_paths, (n_classes, len(folds),
len(self.Cs_), -1))
self.n_iter_ = np.reshape(n_iter_, (n_classes, len(folds),
len(self.Cs_)))
self.coefs_paths_ = dict(zip(labels, coefs_paths))
scores = np.reshape(scores, (n_classes, len(folds), -1))
self.scores_ = dict(zip(labels, scores))
self.C_ = list()
self.coef_ = np.empty((n_classes, X.shape[1]))
self.intercept_ = np.zeros(n_classes)
# hack to iterate only once for multinomial case.
if self.multi_class == 'multinomial':
scores = multi_scores
coefs_paths = multi_coefs_paths
for index, label in enumerate(iter_labels):
if self.multi_class == 'ovr':
scores = self.scores_[label]
coefs_paths = self.coefs_paths_[label]
if self.refit:
best_index = scores.sum(axis=0).argmax()
C_ = self.Cs_[best_index]
self.C_.append(C_)
if self.multi_class == 'multinomial':
coef_init = np.mean(coefs_paths[:, best_index, :, :],
axis=0)
else:
coef_init = np.mean(coefs_paths[:, best_index, :], axis=0)
w, _, _ = logistic_regression_path(
X, y, pos_class=label, Cs=[C_], solver=self.solver,
fit_intercept=self.fit_intercept, coef=coef_init,
max_iter=self.max_iter, tol=self.tol,
penalty=self.penalty, copy=False,
class_weight=class_weight,
multi_class=self.multi_class,
verbose=max(0, self.verbose - 1),
random_state=self.random_state,
check_input=False, max_squared_sum=max_squared_sum,
sample_weight=sample_weight)
w = w[0]
else:
# Take the best scores across every fold and the average of all
# coefficients corresponding to the best scores.
best_indices = np.argmax(scores, axis=1)
w = np.mean([coefs_paths[i][best_indices[i]]
for i in range(len(folds))], axis=0)
self.C_.append(np.mean(self.Cs_[best_indices]))
if self.multi_class == 'multinomial':
self.C_ = np.tile(self.C_, n_classes)
self.coef_ = w[:, :X.shape[1]]
if self.fit_intercept:
self.intercept_ = w[:, -1]
else:
self.coef_[index] = w[: X.shape[1]]
if self.fit_intercept:
self.intercept_[index] = w[-1]
self.C_ = np.asarray(self.C_)
return self
| bsd-3-clause |
soylentdeen/BlurryApple | Disturbances/calc_TF.py | 1 | 1679 | import scipy
import pyfits
import numpy
import matplotlib.pyplot as pyplot
from scipy.optimize import leastsq as lsq
fig = pyplot.figure(0)
fig.clear()
df = '/home/deen/Data/GRAVITY/Disturbance/disturb_0_0.1_open/disturb_0_0.1_open.fits'
CMf = '/home/deen/Code/Python/BlurryApple/Control/Output/HODM_CM20.fits'
data = pyfits.getdata(df)
CM = pyfits.getdata(CMf)
CM = CM[0:60]
frames = data.field(0)
times = data.field(1)+1e-6*data.field(2)
grad = data.field(4)
hodm = data.field(5)
counter = numpy.array(range(len(times)-1))
avg_dt = times[counter+1] - times[counter]
print numpy.mean(avg_dt)
d = numpy.mean(avg_dt)
projections = []
for g in grad:
projections.append(CM.dot(g))
projections = numpy.array(projections)
ax = fig.add_axes([0.1, 0.1, 0.8, 0.8])
#ax.plot(projections[:,0])
#ax.plot(hodm[:,0])
#ax.scatter(hodm[0:400,0], projections[1:401,0])
sig_H = hodm[0:400,0]
sig_P = projections[0:400,0]
FFT_H = numpy.fft.fft(sig_H)
FFT_P = numpy.fft.fft(sig_P)
ratio = FFT_P/FFT_H
ratio_freq = numpy.fft.fftfreq(len(sig_H), d=d)
#phase = numpy.arctan(ratio)
phase = numpy.arctan2(numpy.imag(ratio), numpy.real(ratio))
freqs = scipy.where( (ratio_freq > 0) & (ratio_freq < 20.0))[0]
guess = [-1.0]
def calc_error(g, xvals, yvals):
e = 0.0;
for x, y in zip(xvals, yvals):
e+= (x*g[0] - y)**2.0
return e
fit = lsq(calc_error, guess, args=(ratio_freq[freqs], phase[freqs]))
line = []
for i in ratio_freq[freqs]:
line.append(i*fit[0][0])
ax.plot(ratio_freq[freqs], phase[freqs])
ax.plot(ratio_freq[freqs], line)
latency = fit[0][0]/(2.0*3.14159)
print "Latency : ", numpy.abs(latency)
#ax.plot(ratio_freq, numpy.abs(ratio))
fig.show()
| gpl-2.0 |
longzhi/Zappa | tests/tests.py | 1 | 71351 | # -*- coding: utf8 -*-
import base64
import collections
import json
from io import BytesIO, StringIO
import flask
import mock
import os
import random
import string
import zipfile
import re
import unittest
import shutil
import sys
import tempfile
if sys.version_info[0] < 3:
from contextlib import nested
from cStringIO import StringIO as OldStringIO
from builtins import bytes
from past.builtins import basestring
from click.exceptions import ClickException
from lambda_packages import lambda_packages
from .utils import placebo_session, patch_open
from zappa.cli import ZappaCLI, shamelessly_promote
from zappa.ext.django_zappa import get_django_wsgi
from zappa.handler import LambdaHandler, lambda_handler
from zappa.letsencrypt import get_cert_and_update_domain, create_domain_key, create_domain_csr, create_chained_certificate, get_cert, cleanup, parse_account_key, parse_csr, sign_certificate, encode_certificate, register_account, verify_challenge
from zappa.utilities import (detect_django_settings, copytree, detect_flask_apps,
add_event_source, remove_event_source,
get_event_source_status, parse_s3_url, human_size, string_to_timestamp,
validate_name, InvalidAwsLambdaName, contains_python_files_or_subdirs,
get_venv_from_python_version)
from zappa.wsgi import create_wsgi_request, common_log
from zappa.core import Zappa, ASSUME_POLICY, ATTACH_POLICY
def random_string(length):
return ''.join(random.choice(string.printable) for _ in range(length))
class TestZappa(unittest.TestCase):
def setUp(self):
self.sleep_patch = mock.patch('time.sleep', return_value=None)
# Tests expect us-east-1.
# If the user has set a different region in env variables, we set it aside for now and use us-east-1
self.users_current_region_name = os.environ.get('AWS_DEFAULT_REGION', None)
os.environ['AWS_DEFAULT_REGION'] = 'us-east-1'
if not os.environ.get('PLACEBO_MODE') == 'record':
self.sleep_patch.start()
def tearDown(self):
if not os.environ.get('PLACEBO_MODE') == 'record':
self.sleep_patch.stop()
del os.environ['AWS_DEFAULT_REGION']
if self.users_current_region_name is not None:
# Give the user their AWS region back, we're done testing with us-east-1.
os.environ['AWS_DEFAULT_REGION'] = self.users_current_region_name
##
# Sanity Tests
##
def test_test(self):
self.assertTrue(True)
##
# Basic Tests
##
def test_zappa(self):
self.assertTrue(True)
Zappa()
# @mock.patch('zappa.zappa.find_packages')
# @mock.patch('os.remove')
# def test_copy_editable_packages(self, mock_remove, mock_find_packages):
# temp_package_dir = '/var/folders/rn/9tj3_p0n1ln4q4jn1lgqy4br0000gn/T/1480455339'
# egg_links = [
# '/user/test/.virtualenvs/test/lib/' + get_venv_from_python_version() + '/site-packages/package-python.egg-link'
# ]
# egg_path = "/some/other/directory/package"
# mock_find_packages.return_value = ["package", "package.subpackage", "package.another"]
# temp_egg_link = os.path.join(temp_package_dir, 'package-python.egg-link')
# if sys.version_info[0] < 3:
# z = Zappa()
# with nested(
# patch_open(), mock.patch('glob.glob'), mock.patch('zappa.zappa.copytree')
# ) as ((mock_open, mock_file), mock_glob, mock_copytree):
# # We read in the contents of the egg-link file
# mock_file.read.return_value = "{}\n.".format(egg_path)
# # we use glob.glob to get the egg-links in the temp packages directory
# mock_glob.return_value = [temp_egg_link]
# z.copy_editable_packages(egg_links, temp_package_dir)
# # make sure we copied the right directories
# mock_copytree.assert_called_with(
# os.path.join(egg_path, 'package'),
# os.path.join(temp_package_dir, 'package'),
# symlinks=False
# )
# self.assertEqual(mock_copytree.call_count, 1)
# # make sure it removes the egg-link from the temp packages directory
# mock_remove.assert_called_with(temp_egg_link)
# self.assertEqual(mock_remove.call_count, 1)
def test_create_lambda_package(self):
# mock the pip.get_installed_distributions() to include a package in lambda_packages so that the code
# for zipping pre-compiled packages gets called
mock_named_tuple = collections.namedtuple('mock_named_tuple', ['project_name', 'location'])
mock_return_val = [mock_named_tuple(list(lambda_packages.keys())[0], '/path')] # choose name of 1st package in lambda_packages
with mock.patch('pip.get_installed_distributions', return_value=mock_return_val):
z = Zappa()
path = z.create_lambda_zip(handler_file=os.path.realpath(__file__))
self.assertTrue(os.path.isfile(path))
os.remove(path)
def test_get_manylinux_python27(self):
z = Zappa(runtime='python2.7')
self.assertNotEqual(z.get_manylinux_wheel('pandas'), None)
self.assertEqual(z.get_manylinux_wheel('derpderpderpderp'), None)
# mock the pip.get_installed_distributions() to include a package in manylinux so that the code
# for zipping pre-compiled packages gets called
mock_named_tuple = collections.namedtuple('mock_named_tuple', ['project_name', 'location'])
mock_return_val = [mock_named_tuple('pandas', '/path')]
with mock.patch('pip.get_installed_distributions', return_value=mock_return_val):
z = Zappa()
path = z.create_lambda_zip(handler_file=os.path.realpath(__file__))
self.assertTrue(os.path.isfile(path))
os.remove(path)
def test_get_manylinux_python36(self):
z = Zappa(runtime='python3.6')
self.assertNotEqual(z.get_manylinux_wheel('psycopg2'), None)
self.assertEqual(z.get_manylinux_wheel('derpderpderpderp'), None)
# mock the pip.get_installed_distributions() to include a package in manylinux so that the code
# for zipping pre-compiled packages gets called
mock_named_tuple = collections.namedtuple('mock_named_tuple', ['project_name', 'location'])
mock_return_val = [mock_named_tuple('psycopg2', '/path')]
with mock.patch('pip.get_installed_distributions', return_value=mock_return_val):
z = Zappa()
path = z.create_lambda_zip(handler_file=os.path.realpath(__file__))
self.assertTrue(os.path.isfile(path))
os.remove(path)
def test_load_credentials(self):
z = Zappa()
z.aws_region = 'us-east-1'
z.load_credentials()
self.assertEqual(z.boto_session.region_name, 'us-east-1')
self.assertEqual(z.aws_region, 'us-east-1')
z.aws_region = 'eu-west-1'
z.profile_name = 'default'
z.load_credentials()
self.assertEqual(z.boto_session.region_name, 'eu-west-1')
self.assertEqual(z.aws_region, 'eu-west-1')
creds = {
'AWS_ACCESS_KEY_ID': 'AK123',
'AWS_SECRET_ACCESS_KEY': 'JKL456',
'AWS_DEFAULT_REGION': 'us-west-1'
}
with mock.patch.dict('os.environ', creds):
z.aws_region = None
z.load_credentials()
loaded_creds = z.boto_session._session.get_credentials()
self.assertEqual(loaded_creds.access_key, 'AK123')
self.assertEqual(loaded_creds.secret_key, 'JKL456')
self.assertEqual(z.boto_session.region_name, 'us-west-1')
def test_create_api_gateway_routes_with_different_auth_methods(self):
z = Zappa()
z.parameter_depth = 1
z.integration_response_codes = [200]
z.method_response_codes = [200]
z.http_methods = ['GET']
z.credentials_arn = 'arn:aws:iam::12345:role/ZappaLambdaExecution'
lambda_arn = 'arn:aws:lambda:us-east-1:12345:function:helloworld'
# No auth at all
z.create_stack_template(lambda_arn, 'helloworld', False, False, None)
parsable_template = json.loads(z.cf_template.to_json())
self.assertEqual("NONE", parsable_template["Resources"]["GET0"]["Properties"]["AuthorizationType"])
self.assertEqual("NONE", parsable_template["Resources"]["GET1"]["Properties"]["AuthorizationType"])
self.assertEqual(False, parsable_template["Resources"]["GET0"]["Properties"]["ApiKeyRequired"])
self.assertEqual(False, parsable_template["Resources"]["GET1"]["Properties"]["ApiKeyRequired"])
# IAM auth
z.create_stack_template(lambda_arn, 'helloworld', False, True, None)
parsable_template = json.loads(z.cf_template.to_json())
self.assertEqual("AWS_IAM", parsable_template["Resources"]["GET0"]["Properties"]["AuthorizationType"])
self.assertEqual("AWS_IAM", parsable_template["Resources"]["GET1"]["Properties"]["AuthorizationType"])
self.assertEqual(False, parsable_template["Resources"]["GET0"]["Properties"]["ApiKeyRequired"])
self.assertEqual(False, parsable_template["Resources"]["GET1"]["Properties"]["ApiKeyRequired"])
# CORS with auth
z.create_stack_template(lambda_arn, 'helloworld', False, True, None, True)
parsable_template = json.loads(z.cf_template.to_json())
self.assertEqual("AWS_IAM", parsable_template["Resources"]["GET0"]["Properties"]["AuthorizationType"])
self.assertEqual("AWS_IAM", parsable_template["Resources"]["GET1"]["Properties"]["AuthorizationType"])
self.assertEqual("NONE", parsable_template["Resources"]["OPTIONS0"]["Properties"]["AuthorizationType"])
self.assertEqual("NONE", parsable_template["Resources"]["OPTIONS1"]["Properties"]["AuthorizationType"])
self.assertEqual("MOCK", parsable_template["Resources"]["OPTIONS0"]["Properties"]["Integration"]["Type"])
self.assertEqual("MOCK", parsable_template["Resources"]["OPTIONS1"]["Properties"]["Integration"]["Type"])
self.assertEqual("'Content-Type,X-Amz-Date,Authorization,X-Api-Key,X-Amz-Security-Token'",
parsable_template["Resources"]["OPTIONS0"]["Properties"]["Integration"]["IntegrationResponses"][0]["ResponseParameters"]["method.response.header.Access-Control-Allow-Headers"])
self.assertEqual("'Content-Type,X-Amz-Date,Authorization,X-Api-Key,X-Amz-Security-Token'",
parsable_template["Resources"]["OPTIONS1"]["Properties"]["Integration"]["IntegrationResponses"][0]["ResponseParameters"]["method.response.header.Access-Control-Allow-Headers"])
self.assertTrue(parsable_template["Resources"]["OPTIONS0"]["Properties"]["MethodResponses"][0]["ResponseParameters"]["method.response.header.Access-Control-Allow-Headers"])
self.assertTrue(parsable_template["Resources"]["OPTIONS1"]["Properties"]["MethodResponses"][0]["ResponseParameters"]["method.response.header.Access-Control-Allow-Headers"])
self.assertEqual(False, parsable_template["Resources"]["GET0"]["Properties"]["ApiKeyRequired"])
self.assertEqual(False, parsable_template["Resources"]["GET1"]["Properties"]["ApiKeyRequired"])
# API Key auth
z.create_stack_template(lambda_arn, 'helloworld', True, True, None)
parsable_template = json.loads(z.cf_template.to_json())
self.assertEqual("AWS_IAM", parsable_template["Resources"]["GET0"]["Properties"]["AuthorizationType"])
self.assertEqual("AWS_IAM", parsable_template["Resources"]["GET1"]["Properties"]["AuthorizationType"])
self.assertEqual(True, parsable_template["Resources"]["GET0"]["Properties"]["ApiKeyRequired"])
self.assertEqual(True, parsable_template["Resources"]["GET1"]["Properties"]["ApiKeyRequired"])
# Authorizer and IAM
authorizer = {
"function": "runapi.authorization.gateway_authorizer.evaluate_token",
"result_ttl": 300,
"token_header": "Authorization",
"validation_expression": "xxx"
}
z.create_stack_template(lambda_arn, 'helloworld', False, True, authorizer)
parsable_template = json.loads(z.cf_template.to_json())
self.assertEqual("AWS_IAM", parsable_template["Resources"]["GET0"]["Properties"]["AuthorizationType"])
self.assertEqual("AWS_IAM", parsable_template["Resources"]["GET1"]["Properties"]["AuthorizationType"])
with self.assertRaises(KeyError):
parsable_template["Resources"]["Authorizer"]
# Authorizer with validation expression
invocations_uri = 'arn:aws:apigateway:us-east-1:lambda:path/2015-03-31/functions/' + lambda_arn + '/invocations'
z.create_stack_template(lambda_arn, 'helloworld', False, False, authorizer)
parsable_template = json.loads(z.cf_template.to_json())
self.assertEqual("CUSTOM", parsable_template["Resources"]["GET0"]["Properties"]["AuthorizationType"])
self.assertEqual("CUSTOM", parsable_template["Resources"]["GET1"]["Properties"]["AuthorizationType"])
self.assertEqual("TOKEN", parsable_template["Resources"]["Authorizer"]["Properties"]["Type"])
self.assertEqual("ZappaAuthorizer", parsable_template["Resources"]["Authorizer"]["Properties"]["Name"])
self.assertEqual(300, parsable_template["Resources"]["Authorizer"]["Properties"]["AuthorizerResultTtlInSeconds"])
self.assertEqual(invocations_uri, parsable_template["Resources"]["Authorizer"]["Properties"]["AuthorizerUri"])
self.assertEqual(z.credentials_arn, parsable_template["Resources"]["Authorizer"]["Properties"]["AuthorizerCredentials"])
self.assertEqual("xxx", parsable_template["Resources"]["Authorizer"]["Properties"]["IdentityValidationExpression"])
# Authorizer without validation expression
authorizer.pop('validation_expression', None)
z.create_stack_template(lambda_arn, 'helloworld', False, False, authorizer)
parsable_template = json.loads(z.cf_template.to_json())
self.assertEqual("CUSTOM", parsable_template["Resources"]["GET0"]["Properties"]["AuthorizationType"])
self.assertEqual("CUSTOM", parsable_template["Resources"]["GET1"]["Properties"]["AuthorizationType"])
self.assertEqual("TOKEN", parsable_template["Resources"]["Authorizer"]["Properties"]["Type"])
with self.assertRaises(KeyError):
parsable_template["Resources"]["Authorizer"]["Properties"]["IdentityValidationExpression"]
# Authorizer with arn
authorizer = {
"arn": "arn:aws:lambda:us-east-1:123456789012:function:my-function",
}
z.create_stack_template(lambda_arn, 'helloworld', False, False, authorizer)
parsable_template = json.loads(z.cf_template.to_json())
self.assertEqual('arn:aws:apigateway:us-east-1:lambda:path/2015-03-31/functions/arn:aws:lambda:us-east-1:123456789012:function:my-function/invocations', parsable_template["Resources"]["Authorizer"]["Properties"]["AuthorizerUri"])
def test_policy_json(self):
# ensure the policy docs are valid JSON
json.loads(ASSUME_POLICY)
json.loads(ATTACH_POLICY)
def test_schedule_events(self):
z = Zappa()
path = os.getcwd()
# z.schedule_events # TODO
##
# Logging
##
def test_logging(self):
"""
TODO
"""
Zappa()
##
# Mapping and pattern tests
# Deprecated
##
# def test_redirect_pattern(self):
# test_urls = [
# # a regular endpoint url
# 'https://asdf1234.execute-api.us-east-1.amazonaws.com/env/path/to/thing',
# # an external url (outside AWS)
# 'https://github.com/Miserlou/zappa/issues?q=is%3Aissue+is%3Aclosed',
# # a local url
# '/env/path/to/thing'
# ]
# for code in ['301', '302']:
# pattern = Zappa.selection_pattern(code)
# for url in test_urls:
# self.assertRegexpMatches(url, pattern)
# def test_b64_pattern(self):
# head = '\{"http_status": '
# for code in ['400', '401', '402', '403', '404', '500']:
# pattern = Zappa.selection_pattern(code)
# document = head + code + random_string(50)
# self.assertRegexpMatches(document, pattern)
# for bad_code in ['200', '301', '302']:
# document = base64.b64encode(head + bad_code + random_string(50))
# self.assertNotRegexpMatches(document, pattern)
# def test_200_pattern(self):
# pattern = Zappa.selection_pattern('200')
# self.assertEqual(pattern, '')
##
# WSGI
##
def test_wsgi_event(self):
## This is a pre-proxy+ event
# event = {
# "body": "",
# "headers": {
# "Via": "1.1 e604e934e9195aaf3e36195adbcb3e18.cloudfront.net (CloudFront)",
# "Accept-Language": "en-US,en;q=0.5",
# "Accept-Encoding": "gzip",
# "CloudFront-Is-SmartTV-Viewer": "false",
# "CloudFront-Forwarded-Proto": "https",
# "X-Forwarded-For": "109.81.209.118, 216.137.58.43",
# "CloudFront-Viewer-Country": "CZ",
# "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8",
# "X-Forwarded-Proto": "https",
# "X-Amz-Cf-Id": "LZeP_TZxBgkDt56slNUr_H9CHu1Us5cqhmRSswOh1_3dEGpks5uW-g==",
# "CloudFront-Is-Tablet-Viewer": "false",
# "X-Forwarded-Port": "443",
# "CloudFront-Is-Mobile-Viewer": "false",
# "CloudFront-Is-Desktop-Viewer": "true",
# "Content-Type": "application/json"
# },
# "params": {
# "parameter_1": "asdf1",
# "parameter_2": "asdf2",
# },
# "method": "POST",
# "query": {
# "dead": "beef"
# }
# }
event = {
u'body': None,
u'resource': u'/',
u'requestContext': {
u'resourceId': u'6cqjw9qu0b',
u'apiId': u'9itr2lba55',
u'resourcePath': u'/',
u'httpMethod': u'GET',
u'requestId': u'c17cb1bf-867c-11e6-b938-ed697406e3b5',
u'accountId': u'724336686645',
u'identity': {
u'apiKey': None,
u'userArn': None,
u'cognitoAuthenticationType': None,
u'caller': None,
u'userAgent': u'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.10; rv:48.0) Gecko/20100101 Firefox/48.0',
u'user': None,
u'cognitoIdentityPoolId': None,
u'cognitoIdentityId': None,
u'cognitoAuthenticationProvider': None,
u'sourceIp': u'50.191.225.98',
u'accountId': None,
},
u'stage': u'devorr',
},
u'queryStringParameters': None,
u'httpMethod': u'GET',
u'pathParameters': None,
u'headers': {
u'Via': u'1.1 6801928d54163af944bf854db8d5520e.cloudfront.net (CloudFront)',
u'Accept-Language': u'en-US,en;q=0.5',
u'Accept-Encoding': u'gzip, deflate, br',
u'CloudFront-Is-SmartTV-Viewer': u'false',
u'CloudFront-Forwarded-Proto': u'https',
u'X-Forwarded-For': u'50.191.225.98, 204.246.168.101',
u'CloudFront-Viewer-Country': u'US',
u'Accept': u'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
u'Upgrade-Insecure-Requests': u'1',
u'Host': u'9itr2lba55.execute-api.us-east-1.amazonaws.com',
u'X-Forwarded-Proto': u'https',
u'X-Amz-Cf-Id': u'qgNdqKT0_3RMttu5KjUdnvHI3OKm1BWF8mGD2lX8_rVrJQhhp-MLDw==',
u'CloudFront-Is-Tablet-Viewer': u'false',
u'X-Forwarded-Port': u'443',
u'User-Agent': u'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.10; rv:48.0) Gecko/20100101 Firefox/48.0',
u'CloudFront-Is-Mobile-Viewer': u'false',
u'CloudFront-Is-Desktop-Viewer': u'true',
},
u'stageVariables': None,
u'path': u'/',
}
request = create_wsgi_request(event)
# def test_wsgi_path_info(self):
# # Test no parameters (site.com/)
# event = {
# "body": {},
# "headers": {},
# "pathParameters": {},
# "path": u'/',
# "httpMethod": "GET",
# "queryStringParameters": {}
# }
# request = create_wsgi_request(event, trailing_slash=True)
# self.assertEqual("/", request['PATH_INFO'])
# request = create_wsgi_request(event, trailing_slash=False)
# self.assertEqual("/", request['PATH_INFO'])
# # Test parameters (site.com/asdf1/asdf2 or site.com/asdf1/asdf2/)
# event_asdf2 = {u'body': None, u'resource': u'/{proxy+}', u'requestContext': {u'resourceId': u'dg451y', u'apiId': u'79gqbxq31c', u'resourcePath': u'/{proxy+}', u'httpMethod': u'GET', u'requestId': u'766df67f-8991-11e6-b2c4-d120fedb94e5', u'accountId': u'724336686645', u'identity': {u'apiKey': None, u'userArn': None, u'cognitoAuthenticationType': None, u'caller': None, u'userAgent': u'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.10; rv:49.0) Gecko/20100101 Firefox/49.0', u'user': None, u'cognitoIdentityPoolId': None, u'cognitoIdentityId': None, u'cognitoAuthenticationProvider': None, u'sourceIp': u'96.90.37.59', u'accountId': None}, u'stage': u'devorr'}, u'queryStringParameters': None, u'httpMethod': u'GET', u'pathParameters': {u'proxy': u'asdf1/asdf2'}, u'headers': {u'Via': u'1.1 b2aeb492548a8a2d4036401355f928dd.cloudfront.net (CloudFront)', u'Accept-Language': u'en-US,en;q=0.5', u'Accept-Encoding': u'gzip, deflate, br', u'X-Forwarded-Port': u'443', u'X-Forwarded-For': u'96.90.37.59, 54.240.144.50', u'CloudFront-Viewer-Country': u'US', u'Accept': u'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8', u'Upgrade-Insecure-Requests': u'1', u'Host': u'79gqbxq31c.execute-api.us-east-1.amazonaws.com', u'X-Forwarded-Proto': u'https', u'X-Amz-Cf-Id': u'BBFP-RhGDrQGOzoCqjnfB2I_YzWt_dac9S5vBcSAEaoM4NfYhAQy7Q==', u'User-Agent': u'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.10; rv:49.0) Gecko/20100101 Firefox/49.0', u'CloudFront-Forwarded-Proto': u'https'}, u'stageVariables': None, u'path': u'/asdf1/asdf2'}
# event_asdf2_slash = {u'body': None, u'resource': u'/{proxy+}', u'requestContext': {u'resourceId': u'dg451y', u'apiId': u'79gqbxq31c', u'resourcePath': u'/{proxy+}', u'httpMethod': u'GET', u'requestId': u'd6fda925-8991-11e6-8bd8-b5ec6db19d57', u'accountId': u'724336686645', u'identity': {u'apiKey': None, u'userArn': None, u'cognitoAuthenticationType': None, u'caller': None, u'userAgent': u'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.10; rv:49.0) Gecko/20100101 Firefox/49.0', u'user': None, u'cognitoIdentityPoolId': None, u'cognitoIdentityId': None, u'cognitoAuthenticationProvider': None, u'sourceIp': u'96.90.37.59', u'accountId': None}, u'stage': u'devorr'}, u'queryStringParameters': None, u'httpMethod': u'GET', u'pathParameters': {u'proxy': u'asdf1/asdf2'}, u'headers': {u'Via': u'1.1 c70173a50d0076c99b5e680eb32d40bb.cloudfront.net (CloudFront)', u'Accept-Language': u'en-US,en;q=0.5', u'Accept-Encoding': u'gzip, deflate, br', u'X-Forwarded-Port': u'443', u'X-Forwarded-For': u'96.90.37.59, 54.240.144.53', u'CloudFront-Viewer-Country': u'US', u'Accept': u'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8', u'Upgrade-Insecure-Requests': u'1', u'Host': u'79gqbxq31c.execute-api.us-east-1.amazonaws.com', u'X-Forwarded-Proto': u'https', u'Cookie': u'zappa=AQ4', u'X-Amz-Cf-Id': u'aU_i-iuT3llVUfXv2zv6uU-m77Oga7ANhd5ZYrCoqXBy4K7I2x3FZQ==', u'User-Agent': u'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.10; rv:49.0) Gecko/20100101 Firefox/49.0', u'CloudFront-Forwarded-Proto': u'https'}, u'stageVariables': None, u'path': u'/asdf1/asdf2/'}
# request = create_wsgi_request(event, trailing_slash=True)
# self.assertEqual("/asdf1/asdf2/", request['PATH_INFO'])
# request = create_wsgi_request(event, trailing_slash=False)
# self.assertEqual("/asdf1/asdf2", request['PATH_INFO'])
# request = create_wsgi_request(event, trailing_slash=False, script_name='asdf1')
# self.assertEqual("/asdf1/asdf2", request['PATH_INFO'])
def test_wsgi_path_info_unquoted(self):
event = {
"body": {},
"headers": {},
"pathParameters": {},
"path": '/path%3A1', # encoded /path:1
"httpMethod": "GET",
"queryStringParameters": {},
"requestContext": {}
}
request = create_wsgi_request(event, trailing_slash=True)
self.assertEqual("/path:1", request['PATH_INFO'])
def test_wsgi_logging(self):
# event = {
# "body": {},
# "headers": {},
# "params": {
# "parameter_1": "asdf1",
# "parameter_2": "asdf2",
# },
# "httpMethod": "GET",
# "query": {}
# }
event = {u'body': None, u'resource': u'/{proxy+}', u'requestContext': {u'resourceId': u'dg451y', u'apiId': u'79gqbxq31c', u'resourcePath': u'/{proxy+}', u'httpMethod': u'GET', u'requestId': u'766df67f-8991-11e6-b2c4-d120fedb94e5', u'accountId': u'724336686645', u'identity': {u'apiKey': None, u'userArn': None, u'cognitoAuthenticationType': None, u'caller': None, u'userAgent': u'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.10; rv:49.0) Gecko/20100101 Firefox/49.0', u'user': None, u'cognitoIdentityPoolId': None, u'cognitoIdentityId': None, u'cognitoAuthenticationProvider': None, u'sourceIp': u'96.90.37.59', u'accountId': None}, u'stage': u'devorr'}, u'queryStringParameters': None, u'httpMethod': u'GET', u'pathParameters': {u'proxy': u'asdf1/asdf2'}, u'headers': {u'Via': u'1.1 b2aeb492548a8a2d4036401355f928dd.cloudfront.net (CloudFront)', u'Accept-Language': u'en-US,en;q=0.5', u'Accept-Encoding': u'gzip, deflate, br', u'X-Forwarded-Port': u'443', u'X-Forwarded-For': u'96.90.37.59, 54.240.144.50', u'CloudFront-Viewer-Country': u'US', u'Accept': u'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8', u'Upgrade-Insecure-Requests': u'1', u'Host': u'79gqbxq31c.execute-api.us-east-1.amazonaws.com', u'X-Forwarded-Proto': u'https', u'X-Amz-Cf-Id': u'BBFP-RhGDrQGOzoCqjnfB2I_YzWt_dac9S5vBcSAEaoM4NfYhAQy7Q==', u'User-Agent': u'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.10; rv:49.0) Gecko/20100101 Firefox/49.0', u'CloudFront-Forwarded-Proto': u'https'}, u'stageVariables': None, u'path': u'/asdf1/asdf2'}
environ = create_wsgi_request(event, trailing_slash=False)
response_tuple = collections.namedtuple('Response', ['status_code', 'content'])
response = response_tuple(200, 'hello')
le = common_log(environ, response, response_time=True)
le = common_log(environ, response, response_time=False)
def test_wsgi_multipart(self):
#event = {u'body': u'LS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS03Njk1MjI4NDg0Njc4MTc2NTgwNjMwOTYxDQpDb250ZW50LURpc3Bvc2l0aW9uOiBmb3JtLWRhdGE7IG5hbWU9Im15c3RyaW5nIg0KDQpkZGQNCi0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tNzY5NTIyODQ4NDY3ODE3NjU4MDYzMDk2MS0tDQo=', u'headers': {u'Content-Type': u'multipart/form-data; boundary=---------------------------7695228484678176580630961', u'Via': u'1.1 38205a04d96d60185e88658d3185ccee.cloudfront.net (CloudFront)', u'Accept-Language': u'en-US,en;q=0.5', u'Accept-Encoding': u'gzip, deflate, br', u'CloudFront-Is-SmartTV-Viewer': u'false', u'CloudFront-Forwarded-Proto': u'https', u'X-Forwarded-For': u'71.231.27.57, 104.246.180.51', u'CloudFront-Viewer-Country': u'US', u'Accept': u'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8', u'User-Agent': u'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.10; rv:45.0) Gecko/20100101 Firefox/45.0', u'Host': u'xo2z7zafjh.execute-api.us-east-1.amazonaws.com', u'X-Forwarded-Proto': u'https', u'Cookie': u'zappa=AQ4', u'CloudFront-Is-Tablet-Viewer': u'false', u'X-Forwarded-Port': u'443', u'Referer': u'https://xo8z7zafjh.execute-api.us-east-1.amazonaws.com/former/post', u'CloudFront-Is-Mobile-Viewer': u'false', u'X-Amz-Cf-Id': u'31zxcUcVyUxBOMk320yh5NOhihn5knqrlYQYpGGyOngKKwJb0J0BAQ==', u'CloudFront-Is-Desktop-Viewer': u'true'}, u'params': {u'parameter_1': u'post'}, u'method': u'POST', u'query': {}}
event = {
u'body': u'LS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS03Njk1MjI4NDg0Njc4MTc2NTgwNjMwOTYxDQpDb250ZW50LURpc3Bvc2l0aW9uOiBmb3JtLWRhdGE7IG5hbWU9Im15c3RyaW5nIg0KDQpkZGQNCi0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tNzY5NTIyODQ4NDY3ODE3NjU4MDYzMDk2MS0tDQo=',
u'resource': u'/',
u'requestContext': {
u'resourceId': u'6cqjw9qu0b',
u'apiId': u'9itr2lba55',
u'resourcePath': u'/',
u'httpMethod': u'POST',
u'requestId': u'c17cb1bf-867c-11e6-b938-ed697406e3b5',
u'accountId': u'724336686645',
u'identity': {
u'apiKey': None,
u'userArn': None,
u'cognitoAuthenticationType': None,
u'caller': None,
u'userAgent': u'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.10; rv:48.0) Gecko/20100101 Firefox/48.0',
u'user': None,
u'cognitoIdentityPoolId': None,
u'cognitoIdentityId': None,
u'cognitoAuthenticationProvider': None,
u'sourceIp': u'50.191.225.98',
u'accountId': None,
},
u'stage': u'devorr',
},
u'queryStringParameters': None,
u'httpMethod': u'POST',
u'pathParameters': None,
u'headers': {u'Content-Type': u'multipart/form-data; boundary=---------------------------7695228484678176580630961', u'Via': u'1.1 38205a04d96d60185e88658d3185ccee.cloudfront.net (CloudFront)', u'Accept-Language': u'en-US,en;q=0.5', u'Accept-Encoding': u'gzip, deflate, br', u'CloudFront-Is-SmartTV-Viewer': u'false', u'CloudFront-Forwarded-Proto': u'https', u'X-Forwarded-For': u'71.231.27.57, 104.246.180.51', u'CloudFront-Viewer-Country': u'US', u'Accept': u'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8', u'User-Agent': u'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.10; rv:45.0) Gecko/20100101 Firefox/45.0', u'Host': u'xo2z7zafjh.execute-api.us-east-1.amazonaws.com', u'X-Forwarded-Proto': u'https', u'Cookie': u'zappa=AQ4', u'CloudFront-Is-Tablet-Viewer': u'false', u'X-Forwarded-Port': u'443', u'Referer': u'https://xo8z7zafjh.execute-api.us-east-1.amazonaws.com/former/post', u'CloudFront-Is-Mobile-Viewer': u'false', u'X-Amz-Cf-Id': u'31zxcUcVyUxBOMk320yh5NOhihn5knqrlYQYpGGyOngKKwJb0J0BAQ==', u'CloudFront-Is-Desktop-Viewer': u'true'},
u'stageVariables': None,
u'path': u'/',
}
environ = create_wsgi_request(event, trailing_slash=False)
response_tuple = collections.namedtuple('Response', ['status_code', 'content'])
response = response_tuple(200, 'hello')
def test_wsgi_without_body(self):
event = {
u'body': None,
u'resource': u'/',
u'requestContext': {
u'resourceId': u'6cqjw9qu0b',
u'apiId': u'9itr2lba55',
u'resourcePath': u'/',
u'httpMethod': u'POST',
u'requestId': u'c17cb1bf-867c-11e6-b938-ed697406e3b5',
u'accountId': u'724336686645',
u'identity': {
u'apiKey': None,
u'userArn': None,
u'cognitoAuthenticationType': None,
u'caller': None,
u'userAgent': u'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.10; rv:48.0) Gecko/20100101 Firefox/48.0',
u'user': None,
u'cognitoIdentityPoolId': None,
u'cognitoIdentityId': None,
u'cognitoAuthenticationProvider': None,
u'sourceIp': u'50.191.225.98',
u'accountId': None,
},
u'stage': u'devorr',
},
u'queryStringParameters': None,
u'httpMethod': u'POST',
u'pathParameters': None,
u'headers': {u'Via': u'1.1 38205a04d96d60185e88658d3185ccee.cloudfront.net (CloudFront)', u'Accept-Language': u'en-US,en;q=0.5', u'Accept-Encoding': u'gzip, deflate, br', u'CloudFront-Is-SmartTV-Viewer': u'false', u'CloudFront-Forwarded-Proto': u'https', u'X-Forwarded-For': u'71.231.27.57, 104.246.180.51', u'CloudFront-Viewer-Country': u'US', u'Accept': u'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8', u'User-Agent': u'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.10; rv:45.0) Gecko/20100101 Firefox/45.0', u'Host': u'xo2z7zafjh.execute-api.us-east-1.amazonaws.com', u'X-Forwarded-Proto': u'https', u'Cookie': u'zappa=AQ4', u'CloudFront-Is-Tablet-Viewer': u'false', u'X-Forwarded-Port': u'443', u'Referer': u'https://xo8z7zafjh.execute-api.us-east-1.amazonaws.com/former/post', u'CloudFront-Is-Mobile-Viewer': u'false', u'X-Amz-Cf-Id': u'31zxcUcVyUxBOMk320yh5NOhihn5knqrlYQYpGGyOngKKwJb0J0BAQ==', u'CloudFront-Is-Desktop-Viewer': u'true'},
u'stageVariables': None,
u'path': u'/',
u'isBase64Encoded': True
}
environ = create_wsgi_request(event, trailing_slash=False)
response_tuple = collections.namedtuple('Response', ['status_code', 'content'])
response = response_tuple(200, 'hello')
def test_wsgi_from_apigateway_testbutton(self):
"""
API Gateway resources have a "test bolt" button on methods.
This button sends some empty dicts as 'null' instead of '{}'.
"""
event = {
"resource": "/",
"path": "/",
"httpMethod": "GET",
"headers": None,
"queryStringParameters": None,
"pathParameters": None,
"stageVariables": None,
"requestContext":{
"accountId": "0123456",
"resourceId": "qwertyasdf",
"stage": "test-invoke-stage",
"requestId": "test-invoke-request",
"identity":{
"cognitoIdentityPoolId": None,
"accountId": "0123456",
"cognitoIdentityId": None,
"caller": "MYCALLERID",
"apiKey": "test-invoke-api-key",
"sourceIp": "test-invoke-source-ip",
"accessKey": "MYACCESSKEY",
"cognitoAuthenticationType": None,
"cognitoAuthenticationProvider": None,
"userArn": "arn:aws:iam::fooo:user/my.username",
"userAgent": "Apache-HttpClient/4.5.x (Java/1.8.0_112)",
"user": "MYCALLERID"
},
"resourcePath": "/",
"httpMethod": "GET",
"apiId": "myappid"
},
"body": None,
"isBase64Encoded": False
}
environ = create_wsgi_request(event, trailing_slash=False)
response_tuple = collections.namedtuple('Response', ['status_code', 'content'])
response = response_tuple(200, 'hello')
##
# Handler
##
##
# CLI
##
def test_cli_sanity(self):
zappa_cli = ZappaCLI()
return
def test_load_settings(self):
zappa_cli = ZappaCLI()
zappa_cli.api_stage = 'ttt888'
zappa_cli.load_settings('test_settings.json')
self.assertEqual(False, zappa_cli.stage_config['touch'])
def test_load_extended_settings(self):
zappa_cli = ZappaCLI()
zappa_cli.api_stage = 'extendo'
zappa_cli.load_settings('test_settings.json')
self.assertEqual('lmbda', zappa_cli.stage_config['s3_bucket'])
self.assertEqual(True, zappa_cli.stage_config['touch'])
zappa_cli = ZappaCLI()
zappa_cli.api_stage = 'extendofail'
with self.assertRaises(ClickException):
zappa_cli.load_settings('test_settings.json')
zappa_cli = ZappaCLI()
zappa_cli.api_stage = 'ttt888'
with self.assertRaises(RuntimeError):
zappa_cli.load_settings('tests/test_bad_circular_extends_settings.json')
zappa_cli = ZappaCLI()
zappa_cli.api_stage = 'extendo2'
zappa_cli.load_settings('test_settings.json')
self.assertEqual('lmbda2', zappa_cli.stage_config['s3_bucket']) # Second Extension
self.assertTrue(zappa_cli.stage_config['touch']) # First Extension
self.assertTrue(zappa_cli.stage_config['delete_local_zip']) # The base
def test_load_settings_yaml(self):
zappa_cli = ZappaCLI()
zappa_cli.api_stage = 'ttt888'
zappa_cli.load_settings('tests/test_settings.yml')
self.assertEqual(False, zappa_cli.stage_config['touch'])
zappa_cli = ZappaCLI()
zappa_cli.api_stage = 'extendo'
zappa_cli.load_settings('tests/test_settings.yml')
self.assertEqual('lmbda', zappa_cli.stage_config['s3_bucket'])
self.assertEqual(True, zappa_cli.stage_config['touch'])
def test_load_settings_toml(self):
zappa_cli = ZappaCLI()
zappa_cli.api_stage = 'ttt888'
zappa_cli.load_settings('tests/test_settings.toml')
self.assertEqual(False, zappa_cli.stage_config['touch'])
def test_settings_extension(self):
"""
Make sure Zappa uses settings in the proper order: JSON, TOML, YAML.
"""
tempdir = tempfile.mkdtemp(prefix="zappa-test-settings")
shutil.copy("tests/test_one_env.json", tempdir + "/zappa_settings.json")
shutil.copy("tests/test_settings.yml", tempdir + "/zappa_settings.yml")
shutil.copy("tests/test_settings.toml", tempdir + "/zappa_settings.toml")
orig_cwd = os.getcwd()
os.chdir(tempdir)
try:
zappa_cli = ZappaCLI()
# With all three, we should get the JSON file first.
self.assertEqual(zappa_cli.get_json_or_yaml_settings(),
"zappa_settings.json")
zappa_cli.load_settings_file()
self.assertIn("lonely", zappa_cli.zappa_settings)
os.unlink("zappa_settings.json")
# Without the JSON file, we should get the TOML file.
self.assertEqual(zappa_cli.get_json_or_yaml_settings(),
"zappa_settings.toml")
zappa_cli.load_settings_file()
self.assertIn("ttt888", zappa_cli.zappa_settings)
self.assertNotIn("devor", zappa_cli.zappa_settings)
os.unlink("zappa_settings.toml")
# With just the YAML file, we should get it.
self.assertEqual(zappa_cli.get_json_or_yaml_settings(),
"zappa_settings.yml")
zappa_cli.load_settings_file()
self.assertIn("ttt888", zappa_cli.zappa_settings)
self.assertIn("devor", zappa_cli.zappa_settings)
os.unlink("zappa_settings.yml")
# Without anything, we should get an exception.
self.assertRaises(
ClickException, zappa_cli.get_json_or_yaml_settings)
finally:
os.chdir(orig_cwd)
shutil.rmtree(tempdir)
def test_cli_utility(self):
zappa_cli = ZappaCLI()
zappa_cli.api_stage = 'ttt888'
zappa_cli.load_settings('test_settings.json')
zappa_cli.create_package()
zappa_cli.remove_local_zip()
logs = [
{
'timestamp': '12345',
'message': '[START RequestId] test'
},
{
'timestamp': '12345',
'message': '[REPORT RequestId] test'
},
{
'timestamp': '12345',
'message': '[END RequestId] test'
},
{
'timestamp': '12345',
'message': 'test'
},
{
'timestamp': '1480001341214',
'message': '[INFO] 2016-11-24T15:29:13.326Z c0cb52d1-b25a-11e6-9b73-f940ce24319a 59.111.125.48 - - [24/Nov/2016:15:29:13 +0000] "GET / HTTP/1.1" 200 2590 "" "python-requests/2.11.0" 0/4.672'
},
{
'timestamp': '1480001341214',
'message': '[INFO] 2016-11-24T15:29:13.326Z c0cb52d1-b25a-11e6-9b73-f940ce24319a 59.111.125.48 - - [24/Nov/2016:15:29:13 +0000] "GET / HTTP/1.1" 400 2590 "" "python-requests/2.11.0" 0/4.672'
},
{
'timestamp': '1480001341215',
'message': '[1480001341258] [DEBUG] 2016-11-24T15:29:01.258Z b890d8f6-b25a-11e6-b6bc-718f7ec807df Zappa Event: {}'
}
]
zappa_cli.print_logs(logs)
zappa_cli.print_logs(logs, colorize=False)
zappa_cli.print_logs(logs, colorize=False, http=True)
zappa_cli.print_logs(logs, colorize=True, http=True)
zappa_cli.print_logs(logs, colorize=True, http=False)
zappa_cli.print_logs(logs, colorize=True, non_http=True)
zappa_cli.print_logs(logs, colorize=True, non_http=False)
zappa_cli.print_logs(logs, colorize=True, non_http=True, http=True)
zappa_cli.print_logs(logs, colorize=True, non_http=False, http=False)
zappa_cli.check_for_update()
def test_cli_args(self):
zappa_cli = ZappaCLI()
# Sanity
argv = '-s test_settings.json derp ttt888'.split()
with self.assertRaises(SystemExit) as system_exit:
zappa_cli.handle(argv)
self.assertEqual(system_exit.exception.code, 2)
def test_cli_error_exit_code(self):
# Discussion: https://github.com/Miserlou/Zappa/issues/407
zappa_cli = ZappaCLI()
# Sanity
argv = '-s test_settings.json status devor'.split()
with self.assertRaises(SystemExit) as system_exit:
zappa_cli.handle(argv)
self.assertEqual(system_exit.exception.code, 1)
def test_cli_default(self):
# Discussion: https://github.com/Miserlou/Zappa/issues/422
zappa_cli = ZappaCLI()
argv = '-s tests/test_one_env.json status'.split()
# It'll fail, but at least it'll cover it.
with self.assertRaises(SystemExit) as system_exit:
zappa_cli.handle(argv)
self.assertEqual(system_exit.exception.code, 1)
zappa_cli = ZappaCLI()
argv = '-s tests/test_one_env.json status --all'.split()
# It'll fail, but at least it'll cover it.
with self.assertRaises(SystemExit) as system_exit:
zappa_cli.handle(argv)
self.assertEqual(system_exit.exception.code, 1)
zappa_cli = ZappaCLI()
argv = '-s test_settings.json status'.split()
with self.assertRaises(SystemExit) as system_exit:
zappa_cli.handle(argv)
self.assertEqual(system_exit.exception.code, 2)
def test_cli_negative_rollback(self):
zappa_cli = ZappaCLI()
argv = '-s test_settings.json rollback -n -1 dev'.split()
output = StringIO()
old_stderr, sys.stderr = sys.stderr, output
with self.assertRaises(SystemExit) as system_exit:
zappa_cli.handle(argv)
self.assertEqual(system_exit.exception.code, 2)
error_msg = output.getvalue().strip()
expected = r".*This argument must be positive \(got -1\)$"
self.assertRegexpMatches(error_msg, expected)
sys.stderr = old_stderr
@mock.patch('zappa.cli.ZappaCLI.dispatch_command')
def test_cli_invoke(self, _):
zappa_cli = ZappaCLI()
argv = '-s test_settings.json invoke '.split()
raw_tests = (
['--raw', 'devor', '"print 1+2"'],
['devor', '"print 1+2"', '--raw']
)
for cmd in raw_tests:
zappa_cli.handle(argv + cmd)
args = zappa_cli.vargs
self.assertFalse(args['all'])
self.assertTrue(args['raw'])
self.assertEquals(args['command_rest'], '"print 1+2"')
self.assertEquals(args['command_env'], 'devor')
all_raw_tests = (
['--all', '--raw', '"print 1+2"'],
['"print 1+2"', '--all', '--raw'],
['--raw', '"print 1+2"', '--all'],
['--all', '"print 1+2"', '--raw']
)
for cmd in all_raw_tests:
zappa_cli.handle(argv + cmd)
args = zappa_cli.vargs
self.assertTrue(args['all'])
self.assertTrue(args['raw'])
self.assertEquals(args['command_rest'], '"print 1+2"')
self.assertEquals(args['command_env'], None)
zappa_cli.handle(argv + ['devor', 'myapp.my_func'])
args = zappa_cli.vargs
self.assertEquals(args['command_rest'], 'myapp.my_func')
all_func_tests = (
['--all', 'myapp.my_func'],
['myapp.my_func', '--all']
)
for cmd in all_func_tests:
zappa_cli.handle(argv + cmd)
args = zappa_cli.vargs
self.assertTrue(args['all'])
self.assertEquals(args['command_rest'], 'myapp.my_func')
@mock.patch('zappa.cli.ZappaCLI.dispatch_command')
def test_cli_manage(self, _):
zappa_cli = ZappaCLI()
argv = '-s test_settings.json manage '.split()
all_tests = (
['--all', 'showmigrations', 'admin'],
['showmigrations', 'admin', '--all']
)
for cmd in all_tests:
zappa_cli.handle(argv + cmd)
args = zappa_cli.vargs
self.assertTrue(args['all'])
self.assertTrue(
args['command_rest'] == ['showmigrations', 'admin']
)
cmd = ['devor', 'showmigrations', 'admin']
zappa_cli.handle(argv + cmd)
args = zappa_cli.vargs
self.assertFalse(args['all'])
self.assertTrue(
args['command_rest'] == ['showmigrations', 'admin']
)
cmd = ['devor', '"shell --version"']
zappa_cli.handle(argv + cmd)
args = zappa_cli.vargs
self.assertFalse(args['all'])
self.assertTrue(args['command_rest'] == ['"shell --version"'])
def test_bad_json_catch(self):
zappa_cli = ZappaCLI()
self.assertRaises(ValueError, zappa_cli.load_settings_file, 'tests/test_bad_settings.json')
def test_bad_stage_name_catch(self):
zappa_cli = ZappaCLI()
self.assertRaises(ValueError, zappa_cli.load_settings, 'tests/test_bad_stage_name_settings.json')
def test_bad_environment_vars_catch(self):
zappa_cli = ZappaCLI()
zappa_cli.api_stage = 'ttt888'
self.assertRaises(ValueError, zappa_cli.load_settings, 'tests/test_bad_environment_vars.json')
def test_function_sanity_check(self):
zappa_cli = ZappaCLI()
self.assertRaises(ClickException, zappa_cli.function_sanity_check, 'not_a_module.foo')
self.assertRaises(ClickException, zappa_cli.function_sanity_check, 'tests.test_app.not_a_function')
self.assertRaises(ClickException, zappa_cli.load_settings, 'test/test_bad_module_paths.json')
# @mock.patch('botocore.session.Session.full_config', new_callable=mock.PropertyMock)
# def test_cli_init(self, mock_config):
# # Coverage for all profile detection paths
# mock_config.side_effect = [
# { 'profiles' : { 'default' : { 'region' : 'us-east-1'} } },
# { 'profiles' : { 'default' : { 'region' : 'us-east-1'} } },
# { 'profiles' : {
# 'default' : {
# 'region' : 'us-east-1'
# },
# 'another' : {
# 'region' : 'us-east-1'
# }
# } },
# { 'profiles' : {
# 'radical' : {
# 'region' : 'us-east-1'
# },
# 'another' : {
# 'region' : 'us-east-1'
# }
# } },
# { 'profiles': {} },
# { 'profiles': {} },
# { 'profiles' : { 'default' : { 'region' : 'us-east-1'} } },
# ]
# if os.path.isfile('zappa_settings.json'):
# os.remove('zappa_settings.json')
# # Test directly
# zappa_cli = ZappaCLI()
# # Via http://stackoverflow.com/questions/2617057/how-to-supply-stdin-files-and-environment-variable-inputs-to-python-unit-tests
# inputs = ['dev', 'lmbda', 'test_settings', 'y', '']
# def test_for(inputs):
# input_generator = (i for i in inputs)
# bi = 'builtins.input'
# with mock.patch(bi, lambda prompt: next(input_generator)):
# zappa_cli.init()
# if os.path.isfile('zappa_settings.json'):
# os.remove('zappa_settings.json')
# test_for(inputs)
# test_for(['dev', 'lmbda', 'test_settings', 'n', ''])
# test_for(['dev', 'default', 'lmbda', 'test_settings', '', ''])
# test_for(['dev', 'radical', 'lmbda', 'test_settings', 'p', ''])
# test_for(['dev', 'lmbda', 'test_settings', 'y', ''])
# test_for(['dev', 'lmbda', 'test_settings', 'p', 'n'])
# # Test via handle()
# input_generator = (i for i in inputs)
# bi = 'builtins.input'
# with mock.patch(bi, lambda prompt: next(input_generator)):
# zappa_cli = ZappaCLI()
# argv = ['init']
# zappa_cli.handle(argv)
# if os.path.isfile('zappa_settings.json'):
# os.remove('zappa_settings.json')
def test_domain_name_match(self):
# Simple sanity check
zone = Zappa.get_best_match_zone(all_zones={ 'HostedZones': [
{
'Name': 'example.com.au.',
'Id': 'zone-correct',
'Config': {
'PrivateZone': False
}
}
]},
domain='www.example.com.au')
assert zone == 'zone-correct'
# No match test
zone = Zappa.get_best_match_zone(all_zones={'HostedZones': [
{
'Name': 'example.com.au.',
'Id': 'zone-incorrect',
'Config': {
'PrivateZone': False
}
}
]},
domain='something-else.com.au')
assert zone is None
# More involved, better match should win.
zone = Zappa.get_best_match_zone(all_zones={'HostedZones': [
{
'Name': 'example.com.au.',
'Id': 'zone-incorrect',
'Config': {
'PrivateZone': False
}
},
{
'Name': 'subdomain.example.com.au.',
'Id': 'zone-correct',
'Config': {
'PrivateZone': False
}
}
]},
domain='www.subdomain.example.com.au')
assert zone == 'zone-correct'
# Check private zone is not matched
zone = Zappa.get_best_match_zone(all_zones={ 'HostedZones': [
{
'Name': 'example.com.au.',
'Id': 'zone-private',
'Config': {
'PrivateZone': True
}
}
]},
domain='www.example.com.au')
assert zone is None
# More involved, should ignore the private zone and match the public.
zone = Zappa.get_best_match_zone(all_zones={'HostedZones': [
{
'Name': 'subdomain.example.com.au.',
'Id': 'zone-private',
'Config': {
'PrivateZone': True
}
},
{
'Name': 'subdomain.example.com.au.',
'Id': 'zone-public',
'Config': {
'PrivateZone': False
}
}
]},
domain='www.subdomain.example.com.au')
assert zone == 'zone-public'
##
# Let's Encrypt / ACME
##
def test_lets_encrypt_sanity(self):
# We need a fake account key and crt
import subprocess
proc = subprocess.Popen(["openssl genrsa 2048 > /tmp/account.key"],
stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
out, err = proc.communicate()
if proc.returncode != 0:
raise IOError("OpenSSL Error: {0}".format(err))
proc = subprocess.Popen(["openssl req -x509 -newkey rsa:2048 -subj '/C=US/ST=Denial/L=Springfield/O=Dis/CN=www.example.com' -passout pass:foo -keyout /tmp/key.key -out test_signed.crt -days 1 > /tmp/signed.crt"],
stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
out, err = proc.communicate()
if proc.returncode != 0:
raise IOError("OpenSSL Error: {0}".format(err))
DEFAULT_CA = "https://acme-staging.api.letsencrypt.org"
CA = "https://acme-staging.api.letsencrypt.org"
try:
result = register_account()
except ValueError as e:
pass # that's fine.
create_domain_key()
create_domain_csr('herp.derp.wtf')
parse_account_key()
parse_csr()
create_chained_certificate()
try:
result = sign_certificate()
except ValueError as e:
pass # that's fine.
result = verify_challenge('http://echo.jsontest.com/status/valid')
try:
result = verify_challenge('http://echo.jsontest.com/status/fail')
except ValueError as e:
pass # that's fine.
try:
result = verify_challenge('http://bing.com')
except ValueError as e:
pass # that's fine.
encode_certificate(b'123')
# without domain testing..
zappa_cli = ZappaCLI()
zappa_cli.api_stage = 'ttt888'
zappa_cli.load_settings('test_settings.json')
get_cert_and_update_domain(zappa_cli, 'kerplah', 'zzzz', domain=None, clean_up=True)
os.remove('test_signed.crt')
cleanup()
def test_certify_sanity_checks(self):
"""
Make sure 'zappa certify':
* Writes a warning with the --no-cleanup flag.
* Errors out when a deployment hasn't taken place.
* Writes errors when certificate settings haven't been specified.
* Calls Zappa correctly for creates vs. updates.
"""
old_stdout = sys.stderr
if sys.version_info[0] < 3:
sys.stdout = OldStringIO() # print() barfs on io.* types.
try:
zappa_cli = ZappaCLI()
zappa_cli.domain = "test.example.com"
try:
zappa_cli.certify(no_cleanup=True)
except AttributeError:
# Since zappa_cli.zappa isn't initalized, the certify() call
# fails when it tries to inspect what Zappa has deployed.
pass
log_output = sys.stdout.getvalue()
self.assertIn("You are calling certify with", log_output)
self.assertIn("--no-cleanup", log_output)
class ZappaMock(object):
def __init__(self):
self.function_versions = []
self.domain_names = {}
self.calls = []
def get_lambda_function_versions(self, function_name):
return self.function_versions
def get_domain_name(self, domain):
return self.domain_names.get(domain)
def create_domain_name(self, *args, **kw):
self.calls.append(("create_domain_name", args, kw))
def update_route53_records(self, *args, **kw):
self.calls.append(("update_route53_records", args, kw))
def update_domain_name(self, *args, **kw):
self.calls.append(("update_domain_name", args, kw))
zappa_cli.zappa = ZappaMock()
self.assertRaises(ClickException, zappa_cli.certify)
# Make sure we get an error if we don't configure the domain.
zappa_cli.zappa.function_versions = ["$LATEST"]
zappa_cli.api_stage = "stage"
zappa_cli.zappa_settings = {"stage": {}}
zappa_cli.api_stage = "stage"
zappa_cli.domain = "test.example.com"
try:
zappa_cli.certify()
except ClickException as e:
log_output = str(e)
self.assertIn("Can't certify a domain without", log_output)
self.assertIn("domain", log_output)
# Without any LetsEncrypt settings, we should get a message about
# not having a lets_encrypt_key setting.
zappa_cli.zappa_settings["stage"]["domain"] = "test.example.com"
try:
zappa_cli.certify()
self.fail("Expected a ClickException")
except ClickException as e:
log_output = str(e)
self.assertIn("Can't certify a domain without", log_output)
self.assertIn("lets_encrypt_key", log_output)
# With partial settings, we should get a message about not having
# certificate, certificate_key, and certificate_chain
zappa_cli.zappa_settings["stage"]["certificate"] = "foo"
try:
zappa_cli.certify()
self.fail("Expected a ClickException")
except ClickException as e:
log_output = str(e)
self.assertIn("Can't certify a domain without", log_output)
self.assertIn("certificate_key", log_output)
self.assertIn("certificate_chain", log_output)
zappa_cli.zappa_settings["stage"]["certificate_key"] = "key"
try:
zappa_cli.certify()
self.fail("Expected a ClickException")
except ClickException as e:
log_output = str(e)
self.assertIn("Can't certify a domain without", log_output)
self.assertIn("certificate_key", log_output)
self.assertIn("certificate_chain", log_output)
zappa_cli.zappa_settings["stage"]["certificate_chain"] = "chain"
del zappa_cli.zappa_settings["stage"]["certificate_key"]
try:
zappa_cli.certify()
self.fail("Expected a ClickException")
except ClickException as e:
log_output = str(e)
self.assertIn("Can't certify a domain without", log_output)
self.assertIn("certificate_key", log_output)
self.assertIn("certificate_chain", log_output)
# With all certificate settings, make sure Zappa's domain calls
# are executed.
cert_file = tempfile.NamedTemporaryFile()
cert_file.write(b"Hello world")
cert_file.flush()
zappa_cli.zappa_settings["stage"].update({
"certificate": cert_file.name,
"certificate_key": cert_file.name,
"certificate_chain": cert_file.name
})
sys.stdout.truncate(0)
zappa_cli.certify(no_cleanup=True)
self.assertEquals(len(zappa_cli.zappa.calls), 2)
self.assertTrue(zappa_cli.zappa.calls[0][0] == "create_domain_name")
self.assertTrue(zappa_cli.zappa.calls[1][0] == "update_route53_records")
log_output = sys.stdout.getvalue()
self.assertIn("Created a new domain name", log_output)
zappa_cli.zappa.calls = []
zappa_cli.zappa.domain_names["test.example.com"] = "*.example.com"
sys.stdout.truncate(0)
zappa_cli.certify(no_cleanup=True)
self.assertEquals(len(zappa_cli.zappa.calls), 1)
self.assertTrue(zappa_cli.zappa.calls[0][0] == "update_domain_name")
log_output = sys.stdout.getvalue()
self.assertNotIn("Created a new domain name", log_output)
# Test creating domain without Route53
zappa_cli.zappa_settings["stage"].update({
"route53_enabled": False,
})
zappa_cli.zappa.calls = []
zappa_cli.zappa.domain_names["test.example.com"] = ""
sys.stdout.truncate(0)
zappa_cli.certify(no_cleanup=True)
self.assertEquals(len(zappa_cli.zappa.calls), 1)
self.assertTrue(zappa_cli.zappa.calls[0][0] == "create_domain_name")
log_output = sys.stdout.getvalue()
self.assertIn("Created a new domain name", log_output)
finally:
sys.stdout = old_stdout
##
# Django
##
def test_detect_dj(self):
# Sanity
settings_modules = detect_django_settings()
def test_dj_wsgi(self):
# Sanity
settings_modules = detect_django_settings()
settings = """
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.7/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'alskdfjalsdkf=0*%do-ayvy*m2k=vss*$7)j8q!@u0+d^na7mi2(^!l!d'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
TEMPLATE_DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'blah.urls'
WSGI_APPLICATION = 'hackathon_starter.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.7/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.7/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
"""
djts = open("dj_test_settings.py", "w")
djts.write(settings)
djts.close()
app = get_django_wsgi('dj_test_settings')
try:
os.remove('dj_test_settings.py')
os.remove('dj_test_settings.pyc')
except Exception as e:
pass
##
# Util / Misc
##
def test_human_units(self):
human_size(1)
human_size(9999999999999)
def test_string_to_timestamp(self):
boo = string_to_timestamp("asdf")
self.assertTrue(boo == 0)
yay = string_to_timestamp("1h")
self.assertTrue(type(yay) == int)
self.assertTrue(yay > 0)
yay = string_to_timestamp("4m")
self.assertTrue(type(yay) == int)
self.assertTrue(yay > 0)
yay = string_to_timestamp("1mm")
self.assertTrue(type(yay) == int)
self.assertTrue(yay > 0)
yay = string_to_timestamp("1mm1w1d1h1m1s1ms1us")
self.assertTrue(type(yay) == int)
self.assertTrue(yay > 0)
def test_event_name(self):
zappa = Zappa()
truncated = zappa.get_event_name("basldfkjalsdkfjalsdkfjaslkdfjalsdkfjadlsfkjasdlfkjasdlfkjasdflkjasdf-asdfasdfasdfasdfasdf", "this.is.my.dang.function.wassup.yeah.its.long")
self.assertTrue(len(truncated) <= 64)
self.assertTrue(truncated.endswith("this.is.my.dang.function.wassup.yeah.its.long"))
truncated = zappa.get_event_name("basldfkjalsdkfjalsdkfjaslkdfjalsdkfjadlsfkjasdlfkjasdlfkjasdflkjasdf-asdfasdfasdfasdfasdf", "thisidoasdfaljksdfalskdjfalsdkfjasldkfjalsdkfjalsdkfjalsdfkjalasdfasdfasdfasdklfjasldkfjalsdkjfaslkdfjasldkfjasdflkjdasfskdj")
self.assertTrue(len(truncated) <= 64)
truncated = zappa.get_event_name("a", "b")
self.assertTrue(len(truncated) <= 64)
self.assertEqual(truncated, "a-b")
def test_detect_dj(self):
# Sanity
settings_modules = detect_django_settings()
def test_detect_flask(self):
# Sanity
settings_modules = detect_flask_apps()
def test_shameless(self):
shamelessly_promote()
def test_s3_url_parser(self):
remote_bucket, remote_file = parse_s3_url('s3://my-project-config-files/filename.json')
self.assertEqual(remote_bucket, 'my-project-config-files')
self.assertEqual(remote_file, 'filename.json')
remote_bucket, remote_file = parse_s3_url('s3://your-bucket/account.key')
self.assertEqual(remote_bucket, 'your-bucket')
self.assertEqual(remote_file, 'account.key')
remote_bucket, remote_file = parse_s3_url('s3://my-config-bucket/super-secret-config.json')
self.assertEqual(remote_bucket, 'my-config-bucket')
self.assertEqual(remote_file, 'super-secret-config.json')
remote_bucket, remote_file = parse_s3_url('s3://your-secure-bucket/account.key')
self.assertEqual(remote_bucket, 'your-secure-bucket')
self.assertEqual(remote_file, 'account.key')
remote_bucket, remote_file = parse_s3_url('s3://your-bucket/subfolder/account.key')
self.assertEqual(remote_bucket, 'your-bucket')
self.assertEqual(remote_file, 'subfolder/account.key')
# Sad path
remote_bucket, remote_file = parse_s3_url('/dev/null')
self.assertEqual(remote_bucket, '')
def test_remote_env_package(self):
zappa_cli = ZappaCLI()
zappa_cli.api_stage = 'depricated_remote_env'
zappa_cli.load_settings('test_settings.json')
self.assertEqual('lmbda-env', zappa_cli.stage_config['remote_env_bucket'])
self.assertEqual('dev/env.json', zappa_cli.stage_config['remote_env_file'])
zappa_cli.create_package()
with zipfile.ZipFile(zappa_cli.zip_path, 'r') as lambda_zip:
content = lambda_zip.read('zappa_settings.py')
zappa_cli.remove_local_zip()
# m = re.search("REMOTE_ENV='(.*)'", content)
# self.assertEqual(m.group(1), 's3://lmbda-env/dev/env.json')
zappa_cli = ZappaCLI()
zappa_cli.api_stage = 'remote_env'
zappa_cli.load_settings('test_settings.json')
self.assertEqual('s3://lmbda-env/prod/env.json', zappa_cli.stage_config['remote_env'])
zappa_cli.create_package()
with zipfile.ZipFile(zappa_cli.zip_path, 'r') as lambda_zip:
content = lambda_zip.read('zappa_settings.py')
zappa_cli.remove_local_zip()
# m = re.search("REMOTE_ENV='(.*)'", content)
# self.assertEqual(m.group(1), 's3://lmbda-env/prod/env.json')
def test_package_only(self):
for delete_local_zip in [True, False]:
zappa_cli = ZappaCLI()
if delete_local_zip:
zappa_cli.api_stage = 'build_package_only_delete_local_zip_true'
else:
zappa_cli.api_stage = 'build_package_only_delete_local_zip_false'
zappa_cli.load_settings('test_settings.json')
zappa_cli.package()
zappa_cli.on_exit() # simulate the command exits
# the zip should never be removed
self.assertEqual(os.path.isfile(zappa_cli.zip_path), True)
# cleanup
os.remove(zappa_cli.zip_path)
def test_flask_logging_bug(self):
"""
This checks whether Flask can write errors sanely.
https://github.com/Miserlou/Zappa/issues/283
"""
event = {
"body": {},
"headers": {},
"pathParameters": {},
"path": '/',
"httpMethod": "GET",
"queryStringParameters": {},
"requestContext": {}
}
old_stderr = sys.stderr
sys.stderr = BytesIO()
try:
environ = create_wsgi_request(event)
app = flask.Flask(__name__)
with app.request_context(environ):
app.logger.error(u"This is a test")
log_output = sys.stderr.getvalue()
if sys.version_info[0] < 3:
self.assertNotIn(
"'str' object has no attribute 'write'", log_output)
self.assertNotIn(
"Logged from file tests.py", log_output)
finally:
sys.stderr = old_stderr
def test_slim_handler(self):
zappa_cli = ZappaCLI()
zappa_cli.api_stage = 'slim_handler'
zappa_cli.load_settings('test_settings.json')
zappa_cli.create_package()
self.assertTrue(os.path.isfile(zappa_cli.handler_path))
self.assertTrue(os.path.isfile(zappa_cli.zip_path))
zappa_cli.remove_local_zip()
def test_validate_name(self):
fname = 'tests/name_scenarios.json'
with open(fname, 'r') as f:
scenarios = json.load(f)
for scenario in scenarios:
value = scenario["value"]
is_valid = scenario["is_valid"]
if is_valid:
assert validate_name(value)
else:
with self.assertRaises(InvalidAwsLambdaName) as exc:
validate_name(value)
def test_contains_python_files_or_subdirs(self):
files = ['foo.py']
dirs = []
self.assertTrue(contains_python_files_or_subdirs(dirs, files))
files = []
dirs = ['subfolder']
self.assertTrue(contains_python_files_or_subdirs(dirs, files))
files = ['somefile.txt']
dirs = []
self.assertFalse(contains_python_files_or_subdirs(dirs, files))
if __name__ == '__main__':
unittest.main()
| mit |
jlandmann/oggm | oggm/sandbox/run_alps.py | 2 | 3819 | """Run with a subset of benchmark glaciers"""
from __future__ import division
# Log message format
import logging
logging.basicConfig(format='%(asctime)s: %(name)s: %(message)s',
datefmt='%Y-%m-%d %H:%M:%S', level=logging.DEBUG)
# Module logger
log = logging.getLogger(__name__)
# Python imports
import os
import shutil
from functools import partial
# Libs
import pandas as pd
import geopandas as gpd
import numpy as np
import shapely.geometry as shpg
import matplotlib.pyplot as plt
# Locals
import oggm
import oggm.cfg as cfg
from oggm import workflow
from oggm.utils import get_demo_file
from oggm import tasks
from oggm.workflow import execute_entity_task
from oggm import graphics, utils
# Initialize OGGM
cfg.initialize()
# Local paths (where to write output and where to download input)
WORKING_DIR = '/work/ubuntu/run_alps/wd'
DATA_DIR = '/work/ubuntu/oggm-data'
PLOTS_DIR = '/work/ubuntu/run_alps/plots'
cfg.PATHS['working_dir'] = WORKING_DIR
cfg.PATHS['topo_dir'] = os.path.join(DATA_DIR, 'topo')
cfg.PATHS['rgi_dir'] = os.path.join(DATA_DIR, 'rgi')
# Climate file
hist_path = os.path.join(DATA_DIR, 'histalp_merged_with_cru_hydro_yrs.nc')
dl_hist_path = utils.aws_file_download('alps/histalp_merged_with_cru_hydro_yrs.nc')
shutil.copy(dl_hist_path, hist_path)
cfg.PATHS['cru_dir'] = '~'
cfg.PATHS['climate_file'] = hist_path
# Currently OGGM wants some directories to exist
# (maybe I'll change this but it can also catch errors in the user config)
utils.mkdir(cfg.PATHS['working_dir'])
utils.mkdir(cfg.PATHS['topo_dir'])
utils.mkdir(cfg.PATHS['cru_dir'])
utils.mkdir(cfg.PATHS['rgi_dir'])
# Use multiprocessing?
cfg.PARAMS['use_multiprocessing'] = True
cfg.CONTINUE_ON_ERROR = False
# Run parameters
cfg.PARAMS['d1'] = 4
cfg.PARAMS['dmax'] = 100
cfg.PARAMS['border'] = 120
cfg.PARAMS['invert_with_sliding'] = False
cfg.PARAMS['min_slope'] = 2
cfg.PARAMS['max_shape_param'] = 0.006
cfg.PARAMS['max_thick_to_width_ratio'] = 0.5
cfg.PARAMS['temp_use_local_gradient'] = False
cfg.PARAMS['optimize_thick'] = True
cfg.PARAMS['force_one_flowline'] = ['RGI50-11.01270']
# Read in the Alps RGI file
rgi_pkl_path = utils.aws_file_download('alps/rgi_ref_alps.pkl')
rgidf = pd.read_pickle(rgi_pkl_path)
log.info('Number of glaciers: {}'.format(len(rgidf)))
# Go - initialize working directories
gdirs = workflow.init_glacier_regions(rgidf, reset=True, force=True)
# gdirs = workflow.init_glacier_regions(rgidf)
# Prepro tasks
task_list = [
tasks.glacier_masks,
tasks.compute_centerlines,
tasks.compute_downstream_lines,
tasks.catchment_area,
tasks.initialize_flowlines,
tasks.catchment_width_geom,
tasks.catchment_width_correction
]
for task in task_list:
execute_entity_task(task, gdirs)
# Climate related task
execute_entity_task(tasks.process_custom_climate_data, gdirs)
tasks.compute_ref_t_stars(gdirs)
tasks.distribute_t_stars(gdirs)
# Inversion
execute_entity_task(tasks.prepare_for_inversion, gdirs)
tasks.optimize_inversion_params(gdirs)
execute_entity_task(tasks.volume_inversion, gdirs)
# Write out glacier statistics
df = utils.glacier_characteristics(gdirs)
fpath = os.path.join(cfg.PATHS['working_dir'], 'glacier_char.csv')
df.to_csv(fpath)
# Plots (if you want)
if PLOTS_DIR == '':
exit()
utils.mkdir(PLOTS_DIR)
for gd in gdirs:
bname = os.path.join(PLOTS_DIR, gd.rgi_id + '_')
# graphics.plot_googlemap(gd)
# plt.savefig(bname + 'ggl.png')
# plt.close()
# graphics.plot_domain(gd)
# plt.savefig(bname + 'dom.png')
# plt.close()
graphics.plot_centerlines(gd)
plt.savefig(bname + 'cls.png')
plt.close()
graphics.plot_catchment_width(gd, corrected=True)
plt.savefig(bname + 'w.png')
plt.close()
graphics.plot_inversion(gd)
plt.savefig(bname + 'inv.png')
plt.close()
| gpl-3.0 |
uglyboxer/linear_neuron | net-p3/lib/python3.5/site-packages/matplotlib/sphinxext/ipython_directive.py | 11 | 27706 | # -*- coding: utf-8 -*-
"""Sphinx directive to support embedded IPython code.
This directive allows pasting of entire interactive IPython sessions, prompts
and all, and their code will actually get re-executed at doc build time, with
all prompts renumbered sequentially. It also allows you to input code as a pure
python input by giving the argument python to the directive. The output looks
like an interactive ipython section.
To enable this directive, simply list it in your Sphinx ``conf.py`` file
(making sure the directory where you placed it is visible to sphinx, as is
needed for all Sphinx directives).
By default this directive assumes that your prompts are unchanged IPython ones,
but this can be customized. The configurable options that can be placed in
conf.py are
ipython_savefig_dir:
The directory in which to save the figures. This is relative to the
Sphinx source directory. The default is `html_static_path`.
ipython_rgxin:
The compiled regular expression to denote the start of IPython input
lines. The default is re.compile('In \[(\d+)\]:\s?(.*)\s*'). You
shouldn't need to change this.
ipython_rgxout:
The compiled regular expression to denote the start of IPython output
lines. The default is re.compile('Out\[(\d+)\]:\s?(.*)\s*'). You
shouldn't need to change this.
ipython_promptin:
The string to represent the IPython input prompt in the generated ReST.
The default is 'In [%d]:'. This expects that the line numbers are used
in the prompt.
ipython_promptout:
The string to represent the IPython prompt in the generated ReST. The
default is 'Out [%d]:'. This expects that the line numbers are used
in the prompt.
ToDo
----
- Turn the ad-hoc test() function into a real test suite.
- Break up ipython-specific functionality from matplotlib stuff into better
separated code.
Authors
-------
- John D Hunter: orignal author.
- Fernando Perez: refactoring, documentation, cleanups, port to 0.11.
- VáclavŠmilauer <eudoxos-AT-arcig.cz>: Prompt generalizations.
- Skipper Seabold, refactoring, cleanups, pure python addition
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
from six.moves import xrange
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
# Stdlib
import io
import os
import re
import sys
import tempfile
import ast
from hashlib import md5
# Third-party
import matplotlib
import sphinx
from docutils.parsers.rst import directives
from docutils import nodes
from sphinx.util.compat import Directive
matplotlib.use('Agg')
matplotlib.cbook.warn_deprecated("1.4", """
The Sphinx extension ipython_console_highlighting has moved from
matplotlib to IPython, and its use in matplotlib is deprecated.
Change your import from 'matplotlib.sphinxext.ipython_directive' to
'IPython.sphinxext.ipython_directive.""")
# Our own
try:
from IPython import Config, InteractiveShell
from IPython.core.profiledir import ProfileDir
from IPython.utils import io
except ImportError:
raise ImportError(
"Unable to import the necessary objects from IPython. "
"You may need to install or upgrade your IPython installation.")
#-----------------------------------------------------------------------------
# Globals
#-----------------------------------------------------------------------------
# for tokenizing blocks
COMMENT, INPUT, OUTPUT = list(xrange(3))
#-----------------------------------------------------------------------------
# Functions and class declarations
#-----------------------------------------------------------------------------
def block_parser(part, rgxin, rgxout, fmtin, fmtout):
"""
part is a string of ipython text, comprised of at most one
input, one ouput, comments, and blank lines. The block parser
parses the text into a list of::
blocks = [ (TOKEN0, data0), (TOKEN1, data1), ...]
where TOKEN is one of [COMMENT | INPUT | OUTPUT ] and
data is, depending on the type of token::
COMMENT : the comment string
INPUT: the (DECORATOR, INPUT_LINE, REST) where
DECORATOR: the input decorator (or None)
INPUT_LINE: the input as string (possibly multi-line)
REST : any stdout generated by the input line (not OUTPUT)
OUTPUT: the output string, possibly multi-line
"""
block = []
lines = part.split('\n')
N = len(lines)
i = 0
decorator = None
while 1:
if i==N:
# nothing left to parse -- the last line
break
line = lines[i]
i += 1
line_stripped = line.strip()
if line_stripped.startswith('#'):
block.append((COMMENT, line))
continue
if line_stripped.startswith('@'):
# we're assuming at most one decorator -- may need to
# rethink
decorator = line_stripped
continue
# does this look like an input line?
matchin = rgxin.match(line)
if matchin:
lineno, inputline = int(matchin.group(1)), matchin.group(2)
# the ....: continuation string
continuation = ' %s:'%''.join(['.']*(len(str(lineno))+2))
Nc = len(continuation)
# input lines can continue on for more than one line, if
# we have a '\' line continuation char or a function call
# echo line 'print'. The input line can only be
# terminated by the end of the block or an output line, so
# we parse out the rest of the input line if it is
# multiline as well as any echo text
rest = []
while i<N:
# look ahead; if the next line is blank, or a comment, or
# an output line, we're done
nextline = lines[i]
matchout = rgxout.match(nextline)
#print "nextline=%s, continuation=%s, starts=%s"%(nextline, continuation, nextline.startswith(continuation))
if matchout or nextline.startswith('#'):
break
elif nextline.startswith(continuation):
inputline += '\n' + nextline[Nc:]
else:
rest.append(nextline)
i+= 1
block.append((INPUT, (decorator, inputline, '\n'.join(rest))))
continue
# if it looks like an output line grab all the text to the end
# of the block
matchout = rgxout.match(line)
if matchout:
lineno, output = int(matchout.group(1)), matchout.group(2)
if i<N-1:
output = '\n'.join([output] + lines[i:])
block.append((OUTPUT, output))
break
return block
class EmbeddedSphinxShell(object):
"""An embedded IPython instance to run inside Sphinx"""
def __init__(self):
self.cout = io.StringIO()
# Create config object for IPython
config = Config()
config.Global.display_banner = False
config.Global.exec_lines = ['import numpy as np',
'from pylab import *'
]
config.InteractiveShell.autocall = False
config.InteractiveShell.autoindent = False
config.InteractiveShell.colors = 'NoColor'
# create a profile so instance history isn't saved
tmp_profile_dir = tempfile.mkdtemp(prefix='profile_')
profname = 'auto_profile_sphinx_build'
pdir = os.path.join(tmp_profile_dir,profname)
profile = ProfileDir.create_profile_dir(pdir)
# Create and initialize ipython, but don't start its mainloop
IP = InteractiveShell.instance(config=config, profile_dir=profile)
# io.stdout redirect must be done *after* instantiating InteractiveShell
io.stdout = self.cout
io.stderr = self.cout
# For debugging, so we can see normal output, use this:
#from IPython.utils.io import Tee
#io.stdout = Tee(self.cout, channel='stdout') # dbg
#io.stderr = Tee(self.cout, channel='stderr') # dbg
# Store a few parts of IPython we'll need.
self.IP = IP
self.user_ns = self.IP.user_ns
self.user_global_ns = self.IP.user_global_ns
self.input = ''
self.output = ''
self.is_verbatim = False
self.is_doctest = False
self.is_suppress = False
# on the first call to the savefig decorator, we'll import
# pyplot as plt so we can make a call to the plt.gcf().savefig
self._pyplot_imported = False
def clear_cout(self):
self.cout.seek(0)
self.cout.truncate(0)
def process_input_line(self, line, store_history=True):
"""process the input, capturing stdout"""
#print "input='%s'"%self.input
stdout = sys.stdout
splitter = self.IP.input_splitter
try:
sys.stdout = self.cout
splitter.push(line)
more = splitter.push_accepts_more()
if not more:
source_raw = splitter.source_raw_reset()[1]
self.IP.run_cell(source_raw, store_history=store_history)
finally:
sys.stdout = stdout
def process_image(self, decorator):
"""
# build out an image directive like
# .. image:: somefile.png
# :width 4in
#
# from an input like
# savefig somefile.png width=4in
"""
savefig_dir = self.savefig_dir
source_dir = self.source_dir
saveargs = decorator.split(' ')
filename = saveargs[1]
# insert relative path to image file in source
outfile = os.path.relpath(os.path.join(savefig_dir,filename),
source_dir)
imagerows = ['.. image:: %s'%outfile]
for kwarg in saveargs[2:]:
arg, val = kwarg.split('=')
arg = arg.strip()
val = val.strip()
imagerows.append(' :%s: %s'%(arg, val))
image_file = os.path.basename(outfile) # only return file name
image_directive = '\n'.join(imagerows)
return image_file, image_directive
# Callbacks for each type of token
def process_input(self, data, input_prompt, lineno):
"""Process data block for INPUT token."""
decorator, input, rest = data
image_file = None
image_directive = None
#print 'INPUT:', data # dbg
is_verbatim = decorator=='@verbatim' or self.is_verbatim
is_doctest = decorator=='@doctest' or self.is_doctest
is_suppress = decorator=='@suppress' or self.is_suppress
is_savefig = decorator is not None and \
decorator.startswith('@savefig')
input_lines = input.split('\n')
if len(input_lines) > 1:
if input_lines[-1] != "":
input_lines.append('') # make sure there's a blank line
# so splitter buffer gets reset
continuation = ' %s:'%''.join(['.']*(len(str(lineno))+2))
Nc = len(continuation)
if is_savefig:
image_file, image_directive = self.process_image(decorator)
ret = []
is_semicolon = False
for i, line in enumerate(input_lines):
if line.endswith(';'):
is_semicolon = True
if i==0:
# process the first input line
if is_verbatim:
self.process_input_line('')
self.IP.execution_count += 1 # increment it anyway
else:
# only submit the line in non-verbatim mode
self.process_input_line(line, store_history=True)
formatted_line = '%s %s'%(input_prompt, line)
else:
# process a continuation line
if not is_verbatim:
self.process_input_line(line, store_history=True)
formatted_line = '%s %s'%(continuation, line)
if not is_suppress:
ret.append(formatted_line)
if not is_suppress and len(rest.strip()) and is_verbatim:
# the "rest" is the standard output of the
# input, which needs to be added in
# verbatim mode
ret.append(rest)
self.cout.seek(0)
output = self.cout.read()
if not is_suppress and not is_semicolon:
ret.append(output)
elif is_semicolon: # get spacing right
ret.append('')
self.cout.truncate(0)
return (ret, input_lines, output, is_doctest, image_file,
image_directive)
#print 'OUTPUT', output # dbg
def process_output(self, data, output_prompt,
input_lines, output, is_doctest, image_file):
"""Process data block for OUTPUT token."""
if is_doctest:
submitted = data.strip()
found = output
if found is not None:
found = found.strip()
# XXX - fperez: in 0.11, 'output' never comes with the prompt
# in it, just the actual output text. So I think all this code
# can be nuked...
# the above comment does not appear to be accurate... (minrk)
ind = found.find(output_prompt)
if ind<0:
e='output prompt="%s" does not match out line=%s' % \
(output_prompt, found)
raise RuntimeError(e)
found = found[len(output_prompt):].strip()
if found!=submitted:
e = ('doctest failure for input_lines="%s" with '
'found_output="%s" and submitted output="%s"' %
(input_lines, found, submitted) )
raise RuntimeError(e)
#print 'doctest PASSED for input_lines="%s" with found_output="%s" and submitted output="%s"'%(input_lines, found, submitted)
def process_comment(self, data):
"""Process data fPblock for COMMENT token."""
if not self.is_suppress:
return [data]
def save_image(self, image_file):
"""
Saves the image file to disk.
"""
self.ensure_pyplot()
command = 'plt.gcf().savefig("%s")'%image_file
#print 'SAVEFIG', command # dbg
self.process_input_line('bookmark ipy_thisdir', store_history=False)
self.process_input_line('cd -b ipy_savedir', store_history=False)
self.process_input_line(command, store_history=False)
self.process_input_line('cd -b ipy_thisdir', store_history=False)
self.process_input_line('bookmark -d ipy_thisdir', store_history=False)
self.clear_cout()
def process_block(self, block):
"""
process block from the block_parser and return a list of processed lines
"""
ret = []
output = None
input_lines = None
lineno = self.IP.execution_count
input_prompt = self.promptin%lineno
output_prompt = self.promptout%lineno
image_file = None
image_directive = None
for token, data in block:
if token==COMMENT:
out_data = self.process_comment(data)
elif token==INPUT:
(out_data, input_lines, output, is_doctest, image_file,
image_directive) = \
self.process_input(data, input_prompt, lineno)
elif token==OUTPUT:
out_data = \
self.process_output(data, output_prompt,
input_lines, output, is_doctest,
image_file)
if out_data:
ret.extend(out_data)
# save the image files
if image_file is not None:
self.save_image(image_file)
return ret, image_directive
def ensure_pyplot(self):
if self._pyplot_imported:
return
self.process_input_line('import matplotlib.pyplot as plt',
store_history=False)
def process_pure_python(self, content):
"""
content is a list of strings. it is unedited directive conent
This runs it line by line in the InteractiveShell, prepends
prompts as needed capturing stderr and stdout, then returns
the content as a list as if it were ipython code
"""
output = []
savefig = False # keep up with this to clear figure
multiline = False # to handle line continuation
multiline_start = None
fmtin = self.promptin
ct = 0
for lineno, line in enumerate(content):
line_stripped = line.strip()
if not len(line):
output.append(line)
continue
# handle decorators
if line_stripped.startswith('@'):
output.extend([line])
if 'savefig' in line:
savefig = True # and need to clear figure
continue
# handle comments
if line_stripped.startswith('#'):
output.extend([line])
continue
# deal with lines checking for multiline
continuation = ' %s:'% ''.join(['.']*(len(str(ct))+2))
if not multiline:
modified = "%s %s" % (fmtin % ct, line_stripped)
output.append(modified)
ct += 1
try:
ast.parse(line_stripped)
output.append('')
except Exception: # on a multiline
multiline = True
multiline_start = lineno
else: # still on a multiline
modified = '%s %s' % (continuation, line)
output.append(modified)
try:
mod = ast.parse(
'\n'.join(content[multiline_start:lineno+1]))
if isinstance(mod.body[0], ast.FunctionDef):
# check to see if we have the whole function
for element in mod.body[0].body:
if isinstance(element, ast.Return):
multiline = False
else:
output.append('')
multiline = False
except Exception:
pass
if savefig: # clear figure if plotted
self.ensure_pyplot()
self.process_input_line('plt.clf()', store_history=False)
self.clear_cout()
savefig = False
return output
class IpythonDirective(Directive):
has_content = True
required_arguments = 0
optional_arguments = 4 # python, suppress, verbatim, doctest
final_argumuent_whitespace = True
option_spec = { 'python': directives.unchanged,
'suppress' : directives.flag,
'verbatim' : directives.flag,
'doctest' : directives.flag,
}
shell = EmbeddedSphinxShell()
def get_config_options(self):
# contains sphinx configuration variables
config = self.state.document.settings.env.config
# get config variables to set figure output directory
confdir = self.state.document.settings.env.app.confdir
savefig_dir = config.ipython_savefig_dir
source_dir = os.path.dirname(self.state.document.current_source)
if savefig_dir is None:
savefig_dir = config.html_static_path
if isinstance(savefig_dir, list):
savefig_dir = savefig_dir[0] # safe to assume only one path?
savefig_dir = os.path.join(confdir, savefig_dir)
# get regex and prompt stuff
rgxin = config.ipython_rgxin
rgxout = config.ipython_rgxout
promptin = config.ipython_promptin
promptout = config.ipython_promptout
return savefig_dir, source_dir, rgxin, rgxout, promptin, promptout
def setup(self):
# reset the execution count if we haven't processed this doc
#NOTE: this may be borked if there are multiple seen_doc tmp files
#check time stamp?
seen_docs = [i for i in os.listdir(tempfile.tempdir)
if i.startswith('seen_doc')]
if seen_docs:
fname = os.path.join(tempfile.tempdir, seen_docs[0])
docs = open(fname).read().split('\n')
if not self.state.document.current_source in docs:
self.shell.IP.history_manager.reset()
self.shell.IP.execution_count = 1
else: # haven't processed any docs yet
docs = []
# get config values
(savefig_dir, source_dir, rgxin,
rgxout, promptin, promptout) = self.get_config_options()
# and attach to shell so we don't have to pass them around
self.shell.rgxin = rgxin
self.shell.rgxout = rgxout
self.shell.promptin = promptin
self.shell.promptout = promptout
self.shell.savefig_dir = savefig_dir
self.shell.source_dir = source_dir
# setup bookmark for saving figures directory
self.shell.process_input_line('bookmark ipy_savedir %s'%savefig_dir,
store_history=False)
self.shell.clear_cout()
# write the filename to a tempfile because it's been "seen" now
if not self.state.document.current_source in docs:
fd, fname = tempfile.mkstemp(prefix="seen_doc", text=True)
fout = open(fname, 'a')
fout.write(self.state.document.current_source+'\n')
fout.close()
return rgxin, rgxout, promptin, promptout
def teardown(self):
# delete last bookmark
self.shell.process_input_line('bookmark -d ipy_savedir',
store_history=False)
self.shell.clear_cout()
def run(self):
debug = False
#TODO, any reason block_parser can't be a method of embeddable shell
# then we wouldn't have to carry these around
rgxin, rgxout, promptin, promptout = self.setup()
options = self.options
self.shell.is_suppress = 'suppress' in options
self.shell.is_doctest = 'doctest' in options
self.shell.is_verbatim = 'verbatim' in options
# handle pure python code
if 'python' in self.arguments:
content = self.content
self.content = self.shell.process_pure_python(content)
parts = '\n'.join(self.content).split('\n\n')
lines = ['.. code-block:: ipython','']
figures = []
for part in parts:
block = block_parser(part, rgxin, rgxout, promptin, promptout)
if len(block):
rows, figure = self.shell.process_block(block)
for row in rows:
lines.extend([' %s'%line for line in row.split('\n')])
if figure is not None:
figures.append(figure)
#text = '\n'.join(lines)
#figs = '\n'.join(figures)
for figure in figures:
lines.append('')
lines.extend(figure.split('\n'))
lines.append('')
#print lines
if len(lines)>2:
if debug:
print('\n'.join(lines))
else: #NOTE: this raises some errors, what's it for?
#print 'INSERTING %d lines'%len(lines)
self.state_machine.insert_input(
lines, self.state_machine.input_lines.source(0))
text = '\n'.join(lines)
txtnode = nodes.literal_block(text, text)
txtnode['language'] = 'ipython'
#imgnode = nodes.image(figs)
# cleanup
self.teardown()
return []#, imgnode]
# Enable as a proper Sphinx directive
def setup(app):
setup.app = app
app.add_directive('ipython', IpythonDirective)
app.add_config_value('ipython_savefig_dir', None, True)
app.add_config_value('ipython_rgxin',
re.compile('In \[(\d+)\]:\s?(.*)\s*'), True)
app.add_config_value('ipython_rgxout',
re.compile('Out\[(\d+)\]:\s?(.*)\s*'), True)
app.add_config_value('ipython_promptin', 'In [%d]:', True)
app.add_config_value('ipython_promptout', 'Out[%d]:', True)
# Simple smoke test, needs to be converted to a proper automatic test.
def test():
examples = [
r"""
In [9]: pwd
Out[9]: '/home/jdhunter/py4science/book'
In [10]: cd bookdata/
/home/jdhunter/py4science/book/bookdata
In [2]: from pylab import *
In [2]: ion()
In [3]: im = imread('stinkbug.png')
@savefig mystinkbug.png width=4in
In [4]: imshow(im)
Out[4]: <matplotlib.image.AxesImage object at 0x39ea850>
""",
r"""
In [1]: x = 'hello world'
# string methods can be
# used to alter the string
@doctest
In [2]: x.upper()
Out[2]: 'HELLO WORLD'
@verbatim
In [3]: x.st<TAB>
x.startswith x.strip
""",
r"""
In [130]: url = 'http://ichart.finance.yahoo.com/table.csv?s=CROX\
.....: &d=9&e=22&f=2009&g=d&a=1&br=8&c=2006&ignore=.csv'
In [131]: print url.split('&')
['http://ichart.finance.yahoo.com/table.csv?s=CROX', 'd=9', 'e=22', 'f=2009', 'g=d', 'a=1', 'b=8', 'c=2006', 'ignore=.csv']
In [60]: import urllib
""",
r"""\
In [133]: import numpy.random
@suppress
In [134]: numpy.random.seed(2358)
@doctest
In [135]: numpy.random.rand(10,2)
Out[135]:
array([[ 0.64524308, 0.59943846],
[ 0.47102322, 0.8715456 ],
[ 0.29370834, 0.74776844],
[ 0.99539577, 0.1313423 ],
[ 0.16250302, 0.21103583],
[ 0.81626524, 0.1312433 ],
[ 0.67338089, 0.72302393],
[ 0.7566368 , 0.07033696],
[ 0.22591016, 0.77731835],
[ 0.0072729 , 0.34273127]])
""",
r"""
In [106]: print x
jdh
In [109]: for i in range(10):
.....: print i
.....:
.....:
0
1
2
3
4
5
6
7
8
9
""",
r"""
In [144]: from pylab import *
In [145]: ion()
# use a semicolon to suppress the output
@savefig test_hist.png width=4in
In [151]: hist(np.random.randn(10000), 100);
@savefig test_plot.png width=4in
In [151]: plot(np.random.randn(10000), 'o');
""",
r"""
# use a semicolon to suppress the output
In [151]: plt.clf()
@savefig plot_simple.png width=4in
In [151]: plot([1,2,3])
@savefig hist_simple.png width=4in
In [151]: hist(np.random.randn(10000), 100);
""",
r"""
# update the current fig
In [151]: ylabel('number')
In [152]: title('normal distribution')
@savefig hist_with_text.png
In [153]: grid(True)
""",
]
# skip local-file depending first example:
examples = examples[1:]
#ipython_directive.DEBUG = True # dbg
#options = dict(suppress=True) # dbg
options = dict()
for example in examples:
content = example.split('\n')
ipython_directive('debug', arguments=None, options=options,
content=content, lineno=0,
content_offset=None, block_text=None,
state=None, state_machine=None,
)
# Run test suite as a script
if __name__=='__main__':
if not os.path.isdir('_static'):
os.mkdir('_static')
test()
print('All OK? Check figures in _static/')
| mit |
stulp/dmpbbo | demo_robot/step2_defineTask.py | 1 | 1989 | # This file is part of DmpBbo, a set of libraries and programs for the
# black-box optimization of dynamical movement primitives.
# Copyright (C) 2014 Freek Stulp, ENSTA-ParisTech
#
# DmpBbo is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# DmpBbo is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with DmpBbo. If not, see <http://www.gnu.org/licenses/>.
import os
import sys
import numpy as np
import matplotlib.pyplot as plt
import pickle
import inspect
lib_path = os.path.abspath('../python')
sys.path.append(lib_path)
from TaskThrowBall import TaskThrowBall
if __name__=="__main__":
output_task_file = None
if (len(sys.argv)<2):
print('Usage: '+sys.argv[0]+' <task file.p>')
print('Example: python3 '+sys.argv[0]+' results/task.p')
sys.exit()
if (len(sys.argv)>1):
output_task_file = sys.argv[1]
x_goal = -0.70
x_margin = 0.01
y_floor = -0.3
acceleration_weight = 0.001
task = TaskThrowBall(x_goal,x_margin,y_floor,acceleration_weight)
# Save the task instance itself
print(' * Saving task to file "'+output_task_file+"'")
pickle.dump(task, open(output_task_file, "wb" ))
# Save the source code of the task for future reference
#src_task = inspect.getsourcelines(task.__class__)
#src_task = ' '.join(src_task[0])
#src_task = src_task.replace("(Task)", "")
#filename = directory+'/the_task.py'
#task_file = open(filename, "w")
#task_file.write(src_task)
#task_file.close()
| lgpl-2.1 |
IshankGulati/scikit-learn | sklearn/mixture/tests/test_dpgmm.py | 84 | 7866 | # Important note for the deprecation cleaning of 0.20 :
# All the function and classes of this file have been deprecated in 0.18.
# When you remove this file please also remove the related files
# - 'sklearn/mixture/dpgmm.py'
# - 'sklearn/mixture/gmm.py'
# - 'sklearn/mixture/test_gmm.py'
import unittest
import sys
import numpy as np
from sklearn.mixture import DPGMM, VBGMM
from sklearn.mixture.dpgmm import log_normalize
from sklearn.datasets import make_blobs
from sklearn.utils.testing import assert_array_less, assert_equal
from sklearn.utils.testing import assert_warns_message, ignore_warnings
from sklearn.mixture.tests.test_gmm import GMMTester
from sklearn.externals.six.moves import cStringIO as StringIO
from sklearn.mixture.dpgmm import digamma, gammaln
from sklearn.mixture.dpgmm import wishart_log_det, wishart_logz
np.seterr(all='warn')
@ignore_warnings(category=DeprecationWarning)
def test_class_weights():
# check that the class weights are updated
# simple 3 cluster dataset
X, y = make_blobs(random_state=1)
for Model in [DPGMM, VBGMM]:
dpgmm = Model(n_components=10, random_state=1, alpha=20, n_iter=50)
dpgmm.fit(X)
# get indices of components that are used:
indices = np.unique(dpgmm.predict(X))
active = np.zeros(10, dtype=np.bool)
active[indices] = True
# used components are important
assert_array_less(.1, dpgmm.weights_[active])
# others are not
assert_array_less(dpgmm.weights_[~active], .05)
@ignore_warnings(category=DeprecationWarning)
def test_verbose_boolean():
# checks that the output for the verbose output is the same
# for the flag values '1' and 'True'
# simple 3 cluster dataset
X, y = make_blobs(random_state=1)
for Model in [DPGMM, VBGMM]:
dpgmm_bool = Model(n_components=10, random_state=1, alpha=20,
n_iter=50, verbose=True)
dpgmm_int = Model(n_components=10, random_state=1, alpha=20,
n_iter=50, verbose=1)
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
# generate output with the boolean flag
dpgmm_bool.fit(X)
verbose_output = sys.stdout
verbose_output.seek(0)
bool_output = verbose_output.readline()
# generate output with the int flag
dpgmm_int.fit(X)
verbose_output = sys.stdout
verbose_output.seek(0)
int_output = verbose_output.readline()
assert_equal(bool_output, int_output)
finally:
sys.stdout = old_stdout
@ignore_warnings(category=DeprecationWarning)
def test_verbose_first_level():
# simple 3 cluster dataset
X, y = make_blobs(random_state=1)
for Model in [DPGMM, VBGMM]:
dpgmm = Model(n_components=10, random_state=1, alpha=20, n_iter=50,
verbose=1)
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
dpgmm.fit(X)
finally:
sys.stdout = old_stdout
@ignore_warnings(category=DeprecationWarning)
def test_verbose_second_level():
# simple 3 cluster dataset
X, y = make_blobs(random_state=1)
for Model in [DPGMM, VBGMM]:
dpgmm = Model(n_components=10, random_state=1, alpha=20, n_iter=50,
verbose=2)
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
dpgmm.fit(X)
finally:
sys.stdout = old_stdout
@ignore_warnings(category=DeprecationWarning)
def test_digamma():
assert_warns_message(DeprecationWarning, "The function digamma is"
" deprecated in 0.18 and will be removed in 0.20. "
"Use scipy.special.digamma instead.", digamma, 3)
@ignore_warnings(category=DeprecationWarning)
def test_gammaln():
assert_warns_message(DeprecationWarning, "The function gammaln"
" is deprecated in 0.18 and will be removed"
" in 0.20. Use scipy.special.gammaln instead.",
gammaln, 3)
@ignore_warnings(category=DeprecationWarning)
def test_log_normalize():
v = np.array([0.1, 0.8, 0.01, 0.09])
a = np.log(2 * v)
result = assert_warns_message(DeprecationWarning, "The function "
"log_normalize is deprecated in 0.18 and"
" will be removed in 0.20.",
log_normalize, a)
assert np.allclose(v, result, rtol=0.01)
@ignore_warnings(category=DeprecationWarning)
def test_wishart_log_det():
a = np.array([0.1, 0.8, 0.01, 0.09])
b = np.array([0.2, 0.7, 0.05, 0.1])
assert_warns_message(DeprecationWarning, "The function "
"wishart_log_det is deprecated in 0.18 and"
" will be removed in 0.20.",
wishart_log_det, a, b, 2, 4)
@ignore_warnings(category=DeprecationWarning)
def test_wishart_logz():
assert_warns_message(DeprecationWarning, "The function "
"wishart_logz is deprecated in 0.18 and "
"will be removed in 0.20.", wishart_logz,
3, np.identity(3), 1, 3)
@ignore_warnings(category=DeprecationWarning)
def test_DPGMM_deprecation():
assert_warns_message(
DeprecationWarning, "The `DPGMM` class is not working correctly and "
"it's better to use `sklearn.mixture.BayesianGaussianMixture` class "
"with parameter `weight_concentration_prior_type='dirichlet_process'` "
"instead. DPGMM is deprecated in 0.18 and will be removed in 0.20.",
DPGMM)
def do_model(self, **kwds):
return VBGMM(verbose=False, **kwds)
class DPGMMTester(GMMTester):
model = DPGMM
do_test_eval = False
def score(self, g, train_obs):
_, z = g.score_samples(train_obs)
return g.lower_bound(train_obs, z)
class TestDPGMMWithSphericalCovars(unittest.TestCase, DPGMMTester):
covariance_type = 'spherical'
setUp = GMMTester._setUp
class TestDPGMMWithDiagCovars(unittest.TestCase, DPGMMTester):
covariance_type = 'diag'
setUp = GMMTester._setUp
class TestDPGMMWithTiedCovars(unittest.TestCase, DPGMMTester):
covariance_type = 'tied'
setUp = GMMTester._setUp
class TestDPGMMWithFullCovars(unittest.TestCase, DPGMMTester):
covariance_type = 'full'
setUp = GMMTester._setUp
def test_VBGMM_deprecation():
assert_warns_message(
DeprecationWarning, "The `VBGMM` class is not working correctly and "
"it's better to use `sklearn.mixture.BayesianGaussianMixture` class "
"with parameter `weight_concentration_prior_type="
"'dirichlet_distribution'` instead. VBGMM is deprecated "
"in 0.18 and will be removed in 0.20.", VBGMM)
class VBGMMTester(GMMTester):
model = do_model
do_test_eval = False
def score(self, g, train_obs):
_, z = g.score_samples(train_obs)
return g.lower_bound(train_obs, z)
class TestVBGMMWithSphericalCovars(unittest.TestCase, VBGMMTester):
covariance_type = 'spherical'
setUp = GMMTester._setUp
class TestVBGMMWithDiagCovars(unittest.TestCase, VBGMMTester):
covariance_type = 'diag'
setUp = GMMTester._setUp
class TestVBGMMWithTiedCovars(unittest.TestCase, VBGMMTester):
covariance_type = 'tied'
setUp = GMMTester._setUp
class TestVBGMMWithFullCovars(unittest.TestCase, VBGMMTester):
covariance_type = 'full'
setUp = GMMTester._setUp
def test_vbgmm_no_modify_alpha():
alpha = 2.
n_components = 3
X, y = make_blobs(random_state=1)
vbgmm = VBGMM(n_components=n_components, alpha=alpha, n_iter=1)
assert_equal(vbgmm.alpha, alpha)
assert_equal(vbgmm.fit(X).alpha_, float(alpha) / n_components)
| bsd-3-clause |
jmetzen/scikit-learn | benchmarks/bench_plot_incremental_pca.py | 374 | 6430 | """
========================
IncrementalPCA benchmark
========================
Benchmarks for IncrementalPCA
"""
import numpy as np
import gc
from time import time
from collections import defaultdict
import matplotlib.pyplot as plt
from sklearn.datasets import fetch_lfw_people
from sklearn.decomposition import IncrementalPCA, RandomizedPCA, PCA
def plot_results(X, y, label):
plt.plot(X, y, label=label, marker='o')
def benchmark(estimator, data):
gc.collect()
print("Benching %s" % estimator)
t0 = time()
estimator.fit(data)
training_time = time() - t0
data_t = estimator.transform(data)
data_r = estimator.inverse_transform(data_t)
reconstruction_error = np.mean(np.abs(data - data_r))
return {'time': training_time, 'error': reconstruction_error}
def plot_feature_times(all_times, batch_size, all_components, data):
plt.figure()
plot_results(all_components, all_times['pca'], label="PCA")
plot_results(all_components, all_times['ipca'],
label="IncrementalPCA, bsize=%i" % batch_size)
plot_results(all_components, all_times['rpca'], label="RandomizedPCA")
plt.legend(loc="upper left")
plt.suptitle("Algorithm runtime vs. n_components\n \
LFW, size %i x %i" % data.shape)
plt.xlabel("Number of components (out of max %i)" % data.shape[1])
plt.ylabel("Time (seconds)")
def plot_feature_errors(all_errors, batch_size, all_components, data):
plt.figure()
plot_results(all_components, all_errors['pca'], label="PCA")
plot_results(all_components, all_errors['ipca'],
label="IncrementalPCA, bsize=%i" % batch_size)
plot_results(all_components, all_errors['rpca'], label="RandomizedPCA")
plt.legend(loc="lower left")
plt.suptitle("Algorithm error vs. n_components\n"
"LFW, size %i x %i" % data.shape)
plt.xlabel("Number of components (out of max %i)" % data.shape[1])
plt.ylabel("Mean absolute error")
def plot_batch_times(all_times, n_features, all_batch_sizes, data):
plt.figure()
plot_results(all_batch_sizes, all_times['pca'], label="PCA")
plot_results(all_batch_sizes, all_times['rpca'], label="RandomizedPCA")
plot_results(all_batch_sizes, all_times['ipca'], label="IncrementalPCA")
plt.legend(loc="lower left")
plt.suptitle("Algorithm runtime vs. batch_size for n_components %i\n \
LFW, size %i x %i" % (
n_features, data.shape[0], data.shape[1]))
plt.xlabel("Batch size")
plt.ylabel("Time (seconds)")
def plot_batch_errors(all_errors, n_features, all_batch_sizes, data):
plt.figure()
plot_results(all_batch_sizes, all_errors['pca'], label="PCA")
plot_results(all_batch_sizes, all_errors['ipca'], label="IncrementalPCA")
plt.legend(loc="lower left")
plt.suptitle("Algorithm error vs. batch_size for n_components %i\n \
LFW, size %i x %i" % (
n_features, data.shape[0], data.shape[1]))
plt.xlabel("Batch size")
plt.ylabel("Mean absolute error")
def fixed_batch_size_comparison(data):
all_features = [i.astype(int) for i in np.linspace(data.shape[1] // 10,
data.shape[1], num=5)]
batch_size = 1000
# Compare runtimes and error for fixed batch size
all_times = defaultdict(list)
all_errors = defaultdict(list)
for n_components in all_features:
pca = PCA(n_components=n_components)
rpca = RandomizedPCA(n_components=n_components, random_state=1999)
ipca = IncrementalPCA(n_components=n_components, batch_size=batch_size)
results_dict = {k: benchmark(est, data) for k, est in [('pca', pca),
('ipca', ipca),
('rpca', rpca)]}
for k in sorted(results_dict.keys()):
all_times[k].append(results_dict[k]['time'])
all_errors[k].append(results_dict[k]['error'])
plot_feature_times(all_times, batch_size, all_features, data)
plot_feature_errors(all_errors, batch_size, all_features, data)
def variable_batch_size_comparison(data):
batch_sizes = [i.astype(int) for i in np.linspace(data.shape[0] // 10,
data.shape[0], num=10)]
for n_components in [i.astype(int) for i in
np.linspace(data.shape[1] // 10,
data.shape[1], num=4)]:
all_times = defaultdict(list)
all_errors = defaultdict(list)
pca = PCA(n_components=n_components)
rpca = RandomizedPCA(n_components=n_components, random_state=1999)
results_dict = {k: benchmark(est, data) for k, est in [('pca', pca),
('rpca', rpca)]}
# Create flat baselines to compare the variation over batch size
all_times['pca'].extend([results_dict['pca']['time']] *
len(batch_sizes))
all_errors['pca'].extend([results_dict['pca']['error']] *
len(batch_sizes))
all_times['rpca'].extend([results_dict['rpca']['time']] *
len(batch_sizes))
all_errors['rpca'].extend([results_dict['rpca']['error']] *
len(batch_sizes))
for batch_size in batch_sizes:
ipca = IncrementalPCA(n_components=n_components,
batch_size=batch_size)
results_dict = {k: benchmark(est, data) for k, est in [('ipca',
ipca)]}
all_times['ipca'].append(results_dict['ipca']['time'])
all_errors['ipca'].append(results_dict['ipca']['error'])
plot_batch_times(all_times, n_components, batch_sizes, data)
# RandomizedPCA error is always worse (approx 100x) than other PCA
# tests
plot_batch_errors(all_errors, n_components, batch_sizes, data)
faces = fetch_lfw_people(resize=.2, min_faces_per_person=5)
# limit dataset to 5000 people (don't care who they are!)
X = faces.data[:5000]
n_samples, h, w = faces.images.shape
n_features = X.shape[1]
X -= X.mean(axis=0)
X /= X.std(axis=0)
fixed_batch_size_comparison(X)
variable_batch_size_comparison(X)
plt.show()
| bsd-3-clause |
fivejjs/pyhsmm-autoregressive | examples/demo.py | 1 | 1813 | from __future__ import division
import numpy as np
from matplotlib import pyplot as plt
plt.ion()
np.random.seed(0)
import pyhsmm
from pyhsmm.util.text import progprint_xrange
from pyhsmm.util.stats import whiten, cov
import autoregressive.models as m
import autoregressive.distributions as d
###################
# generate data #
###################
As = [0.99*np.hstack((-np.eye(2),2*np.eye(2))),
np.array([[np.cos(np.pi/6),-np.sin(np.pi/6)],[np.sin(np.pi/6),np.cos(np.pi/6)]]).dot(np.hstack((-np.eye(2),np.eye(2)))) + np.hstack((np.zeros((2,2)),np.eye(2))),
np.array([[np.cos(-np.pi/6),-np.sin(-np.pi/6)],[np.sin(-np.pi/6),np.cos(-np.pi/6)]]).dot(np.hstack((-np.eye(2),np.eye(2)))) + np.hstack((np.zeros((2,2)),np.eye(2)))]
truemodel = m.ARHSMM(
alpha=4.,init_state_concentration=4.,
obs_distns=[d.AutoRegression(A=A,sigma=np.eye(2)) for A in As],
dur_distns=[pyhsmm.basic.distributions.PoissonDuration(alpha_0=4*25,beta_0=4)
for state in range(len(As))],
)
data, labels = truemodel.generate(500)
plt.figure()
plt.plot(data[:,0],data[:,1],'bx-')
plt.gcf().suptitle('data')
truemodel.plot()
plt.gcf().suptitle('truth')
##################
# create model #
##################
Nmax = 10
affine = True
nlags = 3
model = m.ARHMM(
alpha=4.,
init_state_distn='uniform',
obs_distns=[
d.AutoRegression(
nu_0=3,
S_0=np.eye(2),
M_0=np.zeros((2,2*nlags+affine)),
K_0=np.eye(2*nlags+affine),
affine=affine)
for state in range(Nmax)],
)
model.add_data(data)
###############
# inference #
###############
for itr in progprint_xrange(100):
model.resample_model()
model.plot()
plt.gcf().suptitle('sampled')
| gpl-2.0 |
glue-viz/glue-vispy-viewers | glue_vispy_viewers/scatter/tests/test_scatter_viewer.py | 3 | 11133 | import numpy as np
from glue.core import DataCollection, Data
from glue.app.qt.application import GlueApplication
from glue.core.component import Component
from matplotlib import cm
from ..scatter_viewer import VispyScatterViewer
def make_test_data():
data = Data(label="Test Cat Data 1")
np.random.seed(12345)
for letter in 'abcdefxyz':
comp = Component(np.random.random(100))
data.add_component(comp, letter)
return data
def test_scatter_viewer(tmpdir):
# Create fake data
data = make_test_data()
# Create fake session
dc = DataCollection([data])
ga = GlueApplication(dc)
ga.show()
scatter = ga.new_data_viewer(VispyScatterViewer)
scatter.add_data(data)
scatter.viewer_size = (400, 500)
viewer_state = scatter.state
viewer_state.x_att = data.id['a']
viewer_state.y_att = data.id['f']
viewer_state.z_att = data.id['z']
viewer_state.x_stretch = 0.5
viewer_state.y_stretch = 1.0
viewer_state.z_stretch = 2.0
viewer_state.x_min = -0.1
viewer_state.x_max = 1.1
viewer_state.y_min = 0.1
viewer_state.y_max = 0.9
viewer_state.z_min = 0.2
viewer_state.z_max = 0.8
viewer_state.visible_axes = False
# Get layer artist style editor
layer_state = viewer_state.layers[0]
layer_state.size_attribute = data.id['c']
layer_state.size_mode = 'Linear'
layer_state.size_scaling = 2
layer_state.size_vmin = 0.2
layer_state.size_vmax = 0.8
layer_state.cmap_attribute = data.id['y']
layer_state.color_mode = 'Linear'
layer_state.cmap_vmin = 0.1
layer_state.cmap_vmax = 0.9
layer_state.cmap = cm.BuGn
# Check that writing a session works as expected.
session_file = tmpdir.join('test_scatter_viewer.glu').strpath
ga.save_session(session_file)
ga.close()
# Now we can check that everything is restored correctly
ga2 = GlueApplication.restore_session(session_file)
ga2.show()
scatter_r = ga2.viewers[0][0]
assert scatter_r.viewer_size == (400, 500)
viewer_state = scatter_r.state
assert viewer_state.x_att.label == 'a'
assert viewer_state.y_att.label == 'f'
assert viewer_state.z_att.label == 'z'
np.testing.assert_allclose(viewer_state.x_stretch, 0.5, rtol=1e-3)
np.testing.assert_allclose(viewer_state.y_stretch, 1.0, rtol=1e-3)
np.testing.assert_allclose(viewer_state.z_stretch, 2.0, rtol=1e-3)
assert viewer_state.x_min == -0.1
assert viewer_state.x_max == 1.1
assert viewer_state.y_min == 0.1
assert viewer_state.y_max == 0.9
assert viewer_state.z_min == 0.2
assert viewer_state.z_max == 0.8
assert not viewer_state.visible_axes
layer_state = viewer_state.layers[0]
assert layer_state.size_mode == 'Linear'
assert layer_state.size_attribute.label == 'c'
np.testing.assert_allclose(layer_state.size_scaling, 2, rtol=0.01)
assert layer_state.size_vmin == 0.2
assert layer_state.size_vmax == 0.8
assert layer_state.color_mode == 'Linear'
assert layer_state.cmap_attribute.label == 'y'
assert layer_state.cmap_vmin == 0.1
assert layer_state.cmap_vmax == 0.9
assert layer_state.cmap is cm.BuGn
ga2.close()
def test_error_bars(tmpdir):
# Create fake data
data = make_test_data()
# Create fake session
dc = DataCollection([data])
ga = GlueApplication(dc)
ga.show()
scatter = ga.new_data_viewer(VispyScatterViewer)
scatter.add_data(data)
scatter.viewer_size = (400, 500)
viewer_state = scatter.state
viewer_state.x_att = data.id['a']
viewer_state.y_att = data.id['f']
viewer_state.z_att = data.id['z']
layer_state = viewer_state.layers[0]
layer_state.xerr_visible = True
layer_state.xerr_attribute = data.id['b']
layer_state.yerr_visible = False
layer_state.yerr_attribute = data.id['c']
layer_state.zerr_visible = True
layer_state.zerr_attribute = data.id['d']
assert viewer_state.line_width == 1
# Check that writing a session works as expected.
session_file = tmpdir.join('test_error_bars.glu').strpath
ga.save_session(session_file)
ga.close()
# Now we can check that everything is restored correctly
ga2 = GlueApplication.restore_session(session_file)
ga2.show()
scatter_r = ga2.viewers[0][0]
layer_state = scatter_r.state.layers[0]
assert layer_state.xerr_visible
assert layer_state.xerr_attribute.label == 'b'
assert not layer_state.yerr_visible
assert layer_state.yerr_attribute.label == 'c'
assert layer_state.zerr_visible
assert layer_state.zerr_attribute.label == 'd'
assert scatter_r.state.line_width == 1
ga2.close()
def test_vectors(tmpdir):
# Create fake data
data = make_test_data()
# Create fake session
dc = DataCollection([data])
ga = GlueApplication(dc)
ga.show()
scatter = ga.new_data_viewer(VispyScatterViewer)
scatter.add_data(data)
scatter.viewer_size = (400, 500)
viewer_state = scatter.state
viewer_state.x_att = data.id['a']
viewer_state.y_att = data.id['f']
viewer_state.z_att = data.id['z']
layer_state = viewer_state.layers[0]
layer_state.vector_visible = True
layer_state.vx_attribute = data.id['x']
layer_state.vy_attribute = data.id['y']
layer_state.vz_attribute = data.id['e']
layer_state.vector_scaling = 0.1
layer_state.vector_origin = 'tail'
layer_state.vector_arrowhead = True
viewer_state.line_width = 3
# Check that writing a session works as expected.
session_file = tmpdir.join('test_vectors.glu').strpath
ga.save_session(session_file)
ga.close()
# Now we can check that everything is restored correctly
ga2 = GlueApplication.restore_session(session_file)
ga2.show()
scatter_r = ga2.viewers[0][0]
layer_state = scatter_r.state.layers[0]
assert layer_state.vector_visible
assert layer_state.vx_attribute.label == 'x'
assert layer_state.vy_attribute.label == 'y'
assert layer_state.vz_attribute.label == 'e'
assert np.isclose(layer_state.vector_scaling, 0.1)
assert layer_state.vector_origin == 'tail'
assert layer_state.vector_arrowhead
assert scatter_r.state.line_width == 3
ga2.close()
def test_n_dimensional_data():
# Create fake data
data = Data(x=np.random.random((2, 3, 4, 5)),
y=np.random.random((2, 3, 4, 5)),
z=np.random.random((2, 3, 4, 5)))
# Create fake session
dc = DataCollection([data])
ga = GlueApplication(dc)
ga.show()
scatter = ga.new_data_viewer(VispyScatterViewer)
scatter.add_data(data)
layer_artist = scatter.layers[0]
style_widget = scatter._view.layout_style_widgets[layer_artist]
style_widget.size_mode = 'Linear'
style_widget.size_attribute = data.id['x']
style_widget.color_mode = 'Linear'
style_widget.cmap_attribute = data.id['y']
style_widget.cmap = cm.BuGn
ga.close()
def test_scatter_remove_layer_artists(tmpdir):
# Regression test for a bug that caused layer states to not be removed
# when the matching layer artist was removed. This then caused issues when
# loading session files.
# Create fake data
data = make_test_data()
# Create fake session
dc = DataCollection([data])
ga = GlueApplication(dc)
ga.show()
scatter = ga.new_data_viewer(VispyScatterViewer)
scatter.add_data(data)
dc.new_subset_group(subset_state=data.id['x'] > 0.5, label='subset 1')
scatter.add_subset(data.subsets[0])
assert len(scatter.layers) == 2
assert len(scatter.state.layers) == 2
dc.remove_subset_group(dc.subset_groups[0])
assert len(scatter.layers) == 1
assert len(scatter.state.layers) == 1
# Check that writing a session works as expected.
session_file = tmpdir.join('test_scatter_viewer.glu').strpath
ga.save_session(session_file)
ga.close()
# Now we can check that everything is restored correctly
ga2 = GlueApplication.restore_session(session_file)
ga2.show()
ga2.close()
def test_add_data_with_incompatible_subsets(tmpdir):
# Regression test for a bug that an error when adding a dataset with an
# incompatible subset to a 3D scatter viewer.
data1 = Data(label="Data 1", x=[1, 2, 3])
data2 = Data(label="Data 2", y=[4, 5, 6])
dc = DataCollection([data1, data2])
ga = GlueApplication(dc)
ga.show()
# Subset is defined in terms of data2, so it's an incompatible subset
# for data1
dc.new_subset_group(subset_state=data2.id['y'] > 0.5, label='subset 1')
scatter = ga.new_data_viewer(VispyScatterViewer)
scatter.add_data(data1)
ga.close()
def test_not_all_points_inside_limits(tmpdir):
# Regression test for a bug that occurred when not all points were inside
# the visible limits and the color or size mode is linear.
data1 = Data(label="Data", x=[1, 2, 3])
dc = DataCollection([data1])
ga = GlueApplication(dc)
ga.show()
scatter = ga.new_data_viewer(VispyScatterViewer)
scatter.add_data(data1)
scatter.state.layers[0].color_mode = 'Linear'
scatter.state.layers[0].size_mode = 'Linear'
scatter.state.x_min = -0.1
scatter.state.x_max = 2.1
ga.close()
def test_categorical_color_size(tmpdir):
# Create fake data
data = make_test_data()
# Add categorical component
data['categorical'] = ['a', 'b'] * 50
dc = DataCollection([data])
ga = GlueApplication(dc)
ga.show()
scatter = ga.new_data_viewer(VispyScatterViewer)
scatter.add_data(data)
viewer_state = scatter.state
viewer_state.x_att = data.id['a']
viewer_state.y_att = data.id['b']
viewer_state.z_att = data.id['z']
layer_state = viewer_state.layers[0]
layer_state.size_mode = 'Linear'
layer_state.size_attribute = data.id['categorical']
layer_state.color_mode = 'Linear'
layer_state.cmap_attribute = data.id['categorical']
ga.close()
def test_layer_visibility_after_session(tmpdir):
# Regression test for a bug that caused layers to be incorrectly visible
# after saving and loading a session file.
# Create fake data
data = make_test_data()
# Create fake session
dc = DataCollection([data])
ga = GlueApplication(dc)
ga.show()
scatter = ga.new_data_viewer(VispyScatterViewer)
scatter.add_data(data)
viewer_state = scatter.state
layer_state = viewer_state.layers[0]
layer_state.visible = False
session_file = tmpdir.join('test_layer_visibility.glu').strpath
ga.save_session(session_file)
ga.close()
ga2 = GlueApplication.restore_session(session_file)
ga2.show()
scatter_r = ga2.viewers[0][0]
viewer_state = scatter_r.state
layer_state = viewer_state.layers[0]
assert not layer_state.visible
# Make sure the multiscat layer is also not visible (this was where the bug was)
layer_artist = scatter_r.layers[0]
assert not layer_artist._multiscat.layers[layer_artist.id]['visible']
ga2.close()
| bsd-2-clause |
alkyl1978/gnuradio | gr-filter/examples/chirp_channelize.py | 58 | 7169 | #!/usr/bin/env python
#
# Copyright 2009,2012,2013 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
from gnuradio import gr
from gnuradio import blocks
from gnuradio import filter
import sys, time
try:
from gnuradio import analog
except ImportError:
sys.stderr.write("Error: Program requires gr-analog.\n")
sys.exit(1)
try:
import scipy
from scipy import fftpack
except ImportError:
sys.stderr.write("Error: Program requires scipy (see: www.scipy.org).\n")
sys.exit(1)
try:
import pylab
from pylab import mlab
except ImportError:
sys.stderr.write("Error: Program requires matplotlib (see: matplotlib.sourceforge.net).\n")
sys.exit(1)
class pfb_top_block(gr.top_block):
def __init__(self):
gr.top_block.__init__(self)
self._N = 200000 # number of samples to use
self._fs = 9000 # initial sampling rate
self._M = 9 # Number of channels to channelize
# Create a set of taps for the PFB channelizer
self._taps = filter.firdes.low_pass_2(1, self._fs, 500, 20,
attenuation_dB=10,
window=filter.firdes.WIN_BLACKMAN_hARRIS)
# Calculate the number of taps per channel for our own information
tpc = scipy.ceil(float(len(self._taps)) / float(self._M))
print "Number of taps: ", len(self._taps)
print "Number of channels: ", self._M
print "Taps per channel: ", tpc
repeated = True
if(repeated):
self.vco_input = analog.sig_source_f(self._fs, analog.GR_SIN_WAVE, 0.25, 110)
else:
amp = 100
data = scipy.arange(0, amp, amp/float(self._N))
self.vco_input = blocks.vector_source_f(data, False)
# Build a VCO controlled by either the sinusoid or single chirp tone
# Then convert this to a complex signal
self.vco = blocks.vco_f(self._fs, 225, 1)
self.f2c = blocks.float_to_complex()
self.head = blocks.head(gr.sizeof_gr_complex, self._N)
# Construct the channelizer filter
self.pfb = filter.pfb.channelizer_ccf(self._M, self._taps)
# Construct a vector sink for the input signal to the channelizer
self.snk_i = blocks.vector_sink_c()
# Connect the blocks
self.connect(self.vco_input, self.vco, self.f2c)
self.connect(self.f2c, self.head, self.pfb)
self.connect(self.f2c, self.snk_i)
# Create a vector sink for each of M output channels of the filter and connect it
self.snks = list()
for i in xrange(self._M):
self.snks.append(blocks.vector_sink_c())
self.connect((self.pfb, i), self.snks[i])
def main():
tstart = time.time()
tb = pfb_top_block()
tb.run()
tend = time.time()
print "Run time: %f" % (tend - tstart)
if 1:
fig_in = pylab.figure(1, figsize=(16,9), facecolor="w")
fig1 = pylab.figure(2, figsize=(16,9), facecolor="w")
fig2 = pylab.figure(3, figsize=(16,9), facecolor="w")
fig3 = pylab.figure(4, figsize=(16,9), facecolor="w")
Ns = 650
Ne = 20000
fftlen = 8192
winfunc = scipy.blackman
fs = tb._fs
# Plot the input signal on its own figure
d = tb.snk_i.data()[Ns:Ne]
spin_f = fig_in.add_subplot(2, 1, 1)
X,freq = mlab.psd(d, NFFT=fftlen, noverlap=fftlen/4, Fs=fs,
window = lambda d: d*winfunc(fftlen),
scale_by_freq=True)
X_in = 10.0*scipy.log10(abs(fftpack.fftshift(X)))
f_in = scipy.arange(-fs/2.0, fs/2.0, fs/float(X_in.size))
pin_f = spin_f.plot(f_in, X_in, "b")
spin_f.set_xlim([min(f_in), max(f_in)+1])
spin_f.set_ylim([-200.0, 50.0])
spin_f.set_title("Input Signal", weight="bold")
spin_f.set_xlabel("Frequency (Hz)")
spin_f.set_ylabel("Power (dBW)")
Ts = 1.0/fs
Tmax = len(d)*Ts
t_in = scipy.arange(0, Tmax, Ts)
x_in = scipy.array(d)
spin_t = fig_in.add_subplot(2, 1, 2)
pin_t = spin_t.plot(t_in, x_in.real, "b")
pin_t = spin_t.plot(t_in, x_in.imag, "r")
spin_t.set_xlabel("Time (s)")
spin_t.set_ylabel("Amplitude")
Ncols = int(scipy.floor(scipy.sqrt(tb._M)))
Nrows = int(scipy.floor(tb._M / Ncols))
if(tb._M % Ncols != 0):
Nrows += 1
# Plot each of the channels outputs. Frequencies on Figure 2 and
# time signals on Figure 3
fs_o = tb._fs / tb._M
Ts_o = 1.0/fs_o
Tmax_o = len(d)*Ts_o
for i in xrange(len(tb.snks)):
# remove issues with the transients at the beginning
# also remove some corruption at the end of the stream
# this is a bug, probably due to the corner cases
d = tb.snks[i].data()[Ns:Ne]
sp1_f = fig1.add_subplot(Nrows, Ncols, 1+i)
X,freq = mlab.psd(d, NFFT=fftlen, noverlap=fftlen/4, Fs=fs_o,
window = lambda d: d*winfunc(fftlen),
scale_by_freq=True)
X_o = 10.0*scipy.log10(abs(X))
f_o = freq
p2_f = sp1_f.plot(f_o, X_o, "b")
sp1_f.set_xlim([min(f_o), max(f_o)+1])
sp1_f.set_ylim([-200.0, 50.0])
sp1_f.set_title(("Channel %d" % i), weight="bold")
sp1_f.set_xlabel("Frequency (Hz)")
sp1_f.set_ylabel("Power (dBW)")
x_o = scipy.array(d)
t_o = scipy.arange(0, Tmax_o, Ts_o)
sp2_o = fig2.add_subplot(Nrows, Ncols, 1+i)
p2_o = sp2_o.plot(t_o, x_o.real, "b")
p2_o = sp2_o.plot(t_o, x_o.imag, "r")
sp2_o.set_xlim([min(t_o), max(t_o)+1])
sp2_o.set_ylim([-2, 2])
sp2_o.set_title(("Channel %d" % i), weight="bold")
sp2_o.set_xlabel("Time (s)")
sp2_o.set_ylabel("Amplitude")
sp3 = fig3.add_subplot(1,1,1)
p3 = sp3.plot(t_o, x_o.real)
sp3.set_xlim([min(t_o), max(t_o)+1])
sp3.set_ylim([-2, 2])
sp3.set_title("All Channels")
sp3.set_xlabel("Time (s)")
sp3.set_ylabel("Amplitude")
pylab.show()
if __name__ == "__main__":
try:
main()
except KeyboardInterrupt:
pass
| gpl-3.0 |
Lucaszw/DIGITS | digits/layer_outputs/get_deconv.py | 1 | 2763 | import sys
import os
import numpy as np
import matplotlib.pyplot as plt
import argparse
script_location = os.path.dirname(os.path.realpath(sys.argv[0]))
sys.path.insert(0,script_location+"/../..")
import digits
from digits import utils
import argparse
from subprocess import call
import h5py
sys.path.insert(0,"/home/lzeerwanklyn/Projects/caffes/caffe-dvt/python")
# if complaining do this:
# sudo ldconfig /usr/local/cuda/lib64
os.environ['LD_LIBRARY_PATH'] = '/home/lzeerwanklyn/Projects/torches/torch-nv/install/lib:/home/lzeerwanklyn/Projects/nccl/build/lib:/usr/local/cuda-7.5/lib6'
import caffe
def write_deconv(job_path,image_key,layer_name, neuron_index):
delchars = ''.join(c for c in map(chr, range(256)) if not c.isalnum())
# TODO: This should not be hardcoded!
caffe.set_device(0)
caffe.set_mode_gpu()
deploy_prototxt = job_path+"/deploy.prototxt"
caffemodel = job_path+"/model.caffemodel"
# Load network
net = caffe.Net(deploy_prototxt,caffemodel,caffe.TEST)
# Setup data blob for input image
transformer = caffe.io.Transformer({'data': net.blobs['data'].data.shape})
transformer.set_transpose('data', (2,0,1))
transformer.set_raw_scale('data', 255)
transformer.set_channel_swap('data', (2,1,0))
# Load image and perform a forward pass
f = h5py.File(job_path+'/activations.hdf5','r')
image = f[image_key]['data'][:][0]
transformed_image = transformer.preprocess('data', image)
net.blobs['data'].data[...] = transformed_image
net.forward()
# Get deconvolution for specified layer and neuron
diffs = net.blobs[layer_name].diff * 0
diffs[0][int(neuron_index)] = net.blobs[layer_name].data[0,int(neuron_index)]
net.deconv_from_layer(layer_name, diffs, zero_higher = True)
# Save data to numpy array
raw_data = net.blobs['data'].diff[0]
vis_data = utils.image.reshape_data_for_vis(raw_data,'BGR')[:10000]
np.save(script_location+"/deconv/"+layer_name.translate(None, delchars), utils.image.normalize_data(vis_data))
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Deconvolution tool - DIGITS')
### Positional arguments
parser.add_argument('job_path',
help='Path to job containing deploy.prototxt and caffemodel')
parser.add_argument('image_key',
help='Group key of dataset containing image in activations.hdf5')
parser.add_argument('layer_name',
help='Name of layer (string)')
parser.add_argument('neuron_index',
help='neuron index from 0 to N')
args = vars(parser.parse_args())
write_deconv(
args['job_path'],
args['image_key'],
args['layer_name'],
args['neuron_index'],
)
| bsd-3-clause |
mmp2/megaman | megaman/embedding/tests/test_lle.py | 4 | 4114 | # LICENSE: Simplified BSD https://github.com/mmp2/megaman/blob/master/LICENSE
import sys
import numpy as np
import scipy as sp
import scipy.sparse as sparse
from scipy.spatial.distance import squareform, pdist
from itertools import product
from numpy.testing import assert_array_almost_equal
from sklearn import manifold, datasets
from sklearn.neighbors import NearestNeighbors
import megaman.embedding.locally_linear as lle
import megaman.geometry.geometry as geom
from megaman.utils.eigendecomp import EIGEN_SOLVERS
def _check_with_col_sign_flipping(A, B, tol=0.0):
""" Check array A and B are equal with possible sign flipping on
each columns"""
sign = True
for column_idx in range(A.shape[1]):
sign = sign and ((((A[:, column_idx] -
B[:, column_idx]) ** 2).mean() <= tol ** 2) or
(((A[:, column_idx] +
B[:, column_idx]) ** 2).mean() <= tol ** 2))
if not sign:
return False
return True
def test_lle_with_sklearn():
N = 10
X, color = datasets.samples_generator.make_s_curve(N, random_state=0)
n_components = 2
n_neighbors = 3
knn = NearestNeighbors(n_neighbors + 1).fit(X)
G = geom.Geometry()
G.set_data_matrix(X)
G.set_adjacency_matrix(knn.kneighbors_graph(X, mode = 'distance'))
sk_Y_lle = manifold.LocallyLinearEmbedding(n_neighbors, n_components, method = 'standard').fit_transform(X)
(mm_Y_lle, err) = lle.locally_linear_embedding(G, n_components)
assert(_check_with_col_sign_flipping(sk_Y_lle, mm_Y_lle, 0.05))
def test_barycenter_kneighbors_graph():
X = np.array([[0, 1], [1.01, 1.], [2, 0]])
distance_matrix = squareform(pdist(X))
A = lle.barycenter_graph(distance_matrix, X)
# check that columns sum to one
assert_array_almost_equal(np.sum(A.toarray(), 1), np.ones(3))
pred = np.dot(A.toarray(), X)
assert(np.linalg.norm(pred - X) / X.shape[0] < 1)
def test_lle_simple_grid():
# note: ARPACK is numerically unstable, so this test will fail for
# some random seeds. We choose 20 because the tests pass.
rng = np.random.RandomState(20)
tol = 0.1
# grid of equidistant points in 2D, n_components = n_dim
X = np.array(list(product(range(5), repeat=2)))
X = X + 1e-10 * rng.uniform(size=X.shape)
n_components = 2
G = geom.Geometry(adjacency_kwds = {'radius':3})
G.set_data_matrix(X)
tol = 0.1
distance_matrix = G.compute_adjacency_matrix()
N = lle.barycenter_graph(distance_matrix, X).todense()
reconstruction_error = np.linalg.norm(np.dot(N, X) - X, 'fro')
assert(reconstruction_error < tol)
for eigen_solver in EIGEN_SOLVERS:
clf = lle.LocallyLinearEmbedding(n_components = n_components, geom = G,
eigen_solver = eigen_solver, random_state = rng)
clf.fit(X)
assert(clf.embedding_.shape[1] == n_components)
reconstruction_error = np.linalg.norm(
np.dot(N, clf.embedding_) - clf.embedding_, 'fro') ** 2
assert(reconstruction_error < tol)
def test_lle_manifold():
rng = np.random.RandomState(0)
# similar test on a slightly more complex manifold
X = np.array(list(product(np.arange(18), repeat=2)))
X = np.c_[X, X[:, 0] ** 2 / 18]
X = X + 1e-10 * rng.uniform(size=X.shape)
n_components = 2
G = geom.Geometry(adjacency_kwds = {'radius':3})
G.set_data_matrix(X)
distance_matrix = G.compute_adjacency_matrix()
tol = 1.5
N = lle.barycenter_graph(distance_matrix, X).todense()
reconstruction_error = np.linalg.norm(np.dot(N, X) - X)
assert(reconstruction_error < tol)
for eigen_solver in EIGEN_SOLVERS:
clf = lle.LocallyLinearEmbedding(n_components = n_components, geom = G,
eigen_solver = eigen_solver, random_state = rng)
clf.fit(X)
assert(clf.embedding_.shape[1] == n_components)
reconstruction_error = np.linalg.norm(
np.dot(N, clf.embedding_) - clf.embedding_, 'fro') ** 2
assert(reconstruction_error < tol)
| bsd-2-clause |
qifeigit/scikit-learn | sklearn/linear_model/tests/test_base.py | 120 | 10082 | # Author: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Fabian Pedregosa <fabian.pedregosa@inria.fr>
#
# License: BSD 3 clause
import numpy as np
from scipy import sparse
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.linear_model.base import LinearRegression
from sklearn.linear_model.base import center_data, sparse_center_data
from sklearn.utils import check_random_state
from sklearn.datasets.samples_generator import make_sparse_uncorrelated
from sklearn.datasets.samples_generator import make_regression
def test_linear_regression():
# Test LinearRegression on a simple dataset.
# a simple dataset
X = [[1], [2]]
Y = [1, 2]
clf = LinearRegression()
clf.fit(X, Y)
assert_array_almost_equal(clf.coef_, [1])
assert_array_almost_equal(clf.intercept_, [0])
assert_array_almost_equal(clf.predict(X), [1, 2])
# test it also for degenerate input
X = [[1]]
Y = [0]
clf = LinearRegression()
clf.fit(X, Y)
assert_array_almost_equal(clf.coef_, [0])
assert_array_almost_equal(clf.intercept_, [0])
assert_array_almost_equal(clf.predict(X), [0])
def test_fit_intercept():
# Test assertions on betas shape.
X2 = np.array([[0.38349978, 0.61650022],
[0.58853682, 0.41146318]])
X3 = np.array([[0.27677969, 0.70693172, 0.01628859],
[0.08385139, 0.20692515, 0.70922346]])
y = np.array([1, 1])
lr2_without_intercept = LinearRegression(fit_intercept=False).fit(X2, y)
lr2_with_intercept = LinearRegression(fit_intercept=True).fit(X2, y)
lr3_without_intercept = LinearRegression(fit_intercept=False).fit(X3, y)
lr3_with_intercept = LinearRegression(fit_intercept=True).fit(X3, y)
assert_equal(lr2_with_intercept.coef_.shape,
lr2_without_intercept.coef_.shape)
assert_equal(lr3_with_intercept.coef_.shape,
lr3_without_intercept.coef_.shape)
assert_equal(lr2_without_intercept.coef_.ndim,
lr3_without_intercept.coef_.ndim)
def test_linear_regression_sparse(random_state=0):
"Test that linear regression also works with sparse data"
random_state = check_random_state(random_state)
for i in range(10):
n = 100
X = sparse.eye(n, n)
beta = random_state.rand(n)
y = X * beta[:, np.newaxis]
ols = LinearRegression()
ols.fit(X, y.ravel())
assert_array_almost_equal(beta, ols.coef_ + ols.intercept_)
assert_array_almost_equal(ols.residues_, 0)
def test_linear_regression_multiple_outcome(random_state=0):
"Test multiple-outcome linear regressions"
X, y = make_regression(random_state=random_state)
Y = np.vstack((y, y)).T
n_features = X.shape[1]
clf = LinearRegression(fit_intercept=True)
clf.fit((X), Y)
assert_equal(clf.coef_.shape, (2, n_features))
Y_pred = clf.predict(X)
clf.fit(X, y)
y_pred = clf.predict(X)
assert_array_almost_equal(np.vstack((y_pred, y_pred)).T, Y_pred, decimal=3)
def test_linear_regression_sparse_multiple_outcome(random_state=0):
"Test multiple-outcome linear regressions with sparse data"
random_state = check_random_state(random_state)
X, y = make_sparse_uncorrelated(random_state=random_state)
X = sparse.coo_matrix(X)
Y = np.vstack((y, y)).T
n_features = X.shape[1]
ols = LinearRegression()
ols.fit(X, Y)
assert_equal(ols.coef_.shape, (2, n_features))
Y_pred = ols.predict(X)
ols.fit(X, y.ravel())
y_pred = ols.predict(X)
assert_array_almost_equal(np.vstack((y_pred, y_pred)).T, Y_pred, decimal=3)
def test_center_data():
n_samples = 200
n_features = 2
rng = check_random_state(0)
X = rng.rand(n_samples, n_features)
y = rng.rand(n_samples)
expected_X_mean = np.mean(X, axis=0)
# XXX: currently scaled to variance=n_samples
expected_X_std = np.std(X, axis=0) * np.sqrt(X.shape[0])
expected_y_mean = np.mean(y, axis=0)
Xt, yt, X_mean, y_mean, X_std = center_data(X, y, fit_intercept=False,
normalize=False)
assert_array_almost_equal(X_mean, np.zeros(n_features))
assert_array_almost_equal(y_mean, 0)
assert_array_almost_equal(X_std, np.ones(n_features))
assert_array_almost_equal(Xt, X)
assert_array_almost_equal(yt, y)
Xt, yt, X_mean, y_mean, X_std = center_data(X, y, fit_intercept=True,
normalize=False)
assert_array_almost_equal(X_mean, expected_X_mean)
assert_array_almost_equal(y_mean, expected_y_mean)
assert_array_almost_equal(X_std, np.ones(n_features))
assert_array_almost_equal(Xt, X - expected_X_mean)
assert_array_almost_equal(yt, y - expected_y_mean)
Xt, yt, X_mean, y_mean, X_std = center_data(X, y, fit_intercept=True,
normalize=True)
assert_array_almost_equal(X_mean, expected_X_mean)
assert_array_almost_equal(y_mean, expected_y_mean)
assert_array_almost_equal(X_std, expected_X_std)
assert_array_almost_equal(Xt, (X - expected_X_mean) / expected_X_std)
assert_array_almost_equal(yt, y - expected_y_mean)
def test_center_data_multioutput():
n_samples = 200
n_features = 3
n_outputs = 2
rng = check_random_state(0)
X = rng.rand(n_samples, n_features)
y = rng.rand(n_samples, n_outputs)
expected_y_mean = np.mean(y, axis=0)
args = [(center_data, X), (sparse_center_data, sparse.csc_matrix(X))]
for center, X in args:
_, yt, _, y_mean, _ = center(X, y, fit_intercept=False,
normalize=False)
assert_array_almost_equal(y_mean, np.zeros(n_outputs))
assert_array_almost_equal(yt, y)
_, yt, _, y_mean, _ = center(X, y, fit_intercept=True,
normalize=False)
assert_array_almost_equal(y_mean, expected_y_mean)
assert_array_almost_equal(yt, y - y_mean)
_, yt, _, y_mean, _ = center(X, y, fit_intercept=True,
normalize=True)
assert_array_almost_equal(y_mean, expected_y_mean)
assert_array_almost_equal(yt, y - y_mean)
def test_center_data_weighted():
n_samples = 200
n_features = 2
rng = check_random_state(0)
X = rng.rand(n_samples, n_features)
y = rng.rand(n_samples)
sample_weight = rng.rand(n_samples)
expected_X_mean = np.average(X, axis=0, weights=sample_weight)
expected_y_mean = np.average(y, axis=0, weights=sample_weight)
# XXX: if normalize=True, should we expect a weighted standard deviation?
# Currently not weighted, but calculated with respect to weighted mean
# XXX: currently scaled to variance=n_samples
expected_X_std = (np.sqrt(X.shape[0]) *
np.mean((X - expected_X_mean) ** 2, axis=0) ** .5)
Xt, yt, X_mean, y_mean, X_std = center_data(X, y, fit_intercept=True,
normalize=False,
sample_weight=sample_weight)
assert_array_almost_equal(X_mean, expected_X_mean)
assert_array_almost_equal(y_mean, expected_y_mean)
assert_array_almost_equal(X_std, np.ones(n_features))
assert_array_almost_equal(Xt, X - expected_X_mean)
assert_array_almost_equal(yt, y - expected_y_mean)
Xt, yt, X_mean, y_mean, X_std = center_data(X, y, fit_intercept=True,
normalize=True,
sample_weight=sample_weight)
assert_array_almost_equal(X_mean, expected_X_mean)
assert_array_almost_equal(y_mean, expected_y_mean)
assert_array_almost_equal(X_std, expected_X_std)
assert_array_almost_equal(Xt, (X - expected_X_mean) / expected_X_std)
assert_array_almost_equal(yt, y - expected_y_mean)
def test_sparse_center_data():
n_samples = 200
n_features = 2
rng = check_random_state(0)
# random_state not supported yet in sparse.rand
X = sparse.rand(n_samples, n_features, density=.5) # , random_state=rng
X = X.tolil()
y = rng.rand(n_samples)
XA = X.toarray()
# XXX: currently scaled to variance=n_samples
expected_X_std = np.std(XA, axis=0) * np.sqrt(X.shape[0])
Xt, yt, X_mean, y_mean, X_std = sparse_center_data(X, y,
fit_intercept=False,
normalize=False)
assert_array_almost_equal(X_mean, np.zeros(n_features))
assert_array_almost_equal(y_mean, 0)
assert_array_almost_equal(X_std, np.ones(n_features))
assert_array_almost_equal(Xt.A, XA)
assert_array_almost_equal(yt, y)
Xt, yt, X_mean, y_mean, X_std = sparse_center_data(X, y,
fit_intercept=True,
normalize=False)
assert_array_almost_equal(X_mean, np.mean(XA, axis=0))
assert_array_almost_equal(y_mean, np.mean(y, axis=0))
assert_array_almost_equal(X_std, np.ones(n_features))
assert_array_almost_equal(Xt.A, XA)
assert_array_almost_equal(yt, y - np.mean(y, axis=0))
Xt, yt, X_mean, y_mean, X_std = sparse_center_data(X, y,
fit_intercept=True,
normalize=True)
assert_array_almost_equal(X_mean, np.mean(XA, axis=0))
assert_array_almost_equal(y_mean, np.mean(y, axis=0))
assert_array_almost_equal(X_std, expected_X_std)
assert_array_almost_equal(Xt.A, XA / expected_X_std)
assert_array_almost_equal(yt, y - np.mean(y, axis=0))
def test_csr_sparse_center_data():
# Test output format of sparse_center_data, when input is csr
X, y = make_regression()
X[X < 2.5] = 0.0
csr = sparse.csr_matrix(X)
csr_, y, _, _, _ = sparse_center_data(csr, y, True)
assert_equal(csr_.getformat(), 'csr')
| bsd-3-clause |
avistous/QSTK | Tools/Visualizer/FormatData.py | 3 | 3430 | '''
(c) 2011, 2012 Georgia Tech Research Corporation
This source code is released under the New BSD license. Please see
http://wiki.quantsoftware.org/index.php?title=QSTK_License
for license details.
Created on April, 20, 2012
@author: Sourabh Bajaj
@contact: sourabhbajaj90@gmail.com
@summary: Visualizer - Random Data Source - Weather Data
'''
#import libraries
import numpy as np
import datetime as dt
import pickle
import dircache
import os
import string
import datetime as dt
from pylab import *
from pandas import *
import qstkutil.tsutil as tsu
def genData():
op_folderpath = os.environ['QS'] + 'Tools/Visualizer/Data/Norway'
ip_folderpath = os.environ['QS'] + 'Tools/Visualizer/Data/Norway/Raw/'
if not os.path.exists(op_folderpath):
os.mkdir(op_folderpath)
print "Data was missing"
return
op_folderpath = op_folderpath + '/'
files_at_this_path = dircache.listdir(ip_folderpath)
ip_folderpath = ip_folderpath +'/'
stationnames = []
startyears = []
endyears=[]
for file1 in files_at_this_path:
file = open(ip_folderpath + file1, 'r')
for f in file.readlines():
if string.find(f, 'Name')!=-1:
n= string.lstrip(f, 'Name= ')
stationnames.append(string.rstrip(n))
if string.find(f, 'Start year')!=-1:
n= string.lstrip(f, 'Start year= ')
startyears.append(int(string.rstrip(n)))
if string.find(f, 'End year')!=-1:
n= string.lstrip(f, 'End year= ')
endyears.append(int(string.rstrip(n)))
file.close()
timestamps = [ dt.datetime(year,1,1) for year in range(min(startyears),max(endyears)+1)]
months = ['January','February','March','April','May','June','July','August','September','October','November','December']
numpyarray = np.empty([len(months),len(timestamps),len(stationnames)])
numpyarray[:] = np.NAN
PandasObject= Panel(numpyarray, items=months, major_axis=timestamps, minor_axis=stationnames)
for i, file1 in enumerate(files_at_this_path):
flag=0
station=stationnames[i]
file = open(ip_folderpath + file1, 'r')
for f in file.readlines():
if flag==1:
data=string.split(f)
year = int(data.pop(0))
time = dt.datetime(year,1,1)
for month,val in zip(months,data):
PandasObject[month][station][time] = float(val)
if string.find(f, 'Obs')!=-1:
flag=1
file.close()
#Creating a txt file of timestamps
file = open(op_folderpath +'TimeStamps.txt', 'w')
for onedate in timestamps:
stringdate=dt.date.isoformat(onedate)
file.write(stringdate+'\n')
file.close()
#Creating a txt file of symbols
file = open(op_folderpath +'Symbols.txt', 'w')
for sym in stationnames:
file.write(str(sym)+'\n')
file.close()
#Creating a txt file of Features
file = open(op_folderpath +'Features.txt', 'w')
for f in months:
file.write(f+'\n')
file.close()
Numpyarray_Final = PandasObject.values
for i,month in enumerate(months):
for j,station in enumerate(stationnames):
for k in range(len(timestamps)-1):
if np.isnan(Numpyarray_Final[i][k+1][j]):
Numpyarray_Final[i][k+1][j] = Numpyarray_Final[i][k][j]
for i,month in enumerate(months):
for j,station in enumerate(stationnames):
for z in range(1,len(timestamps)):
k = len(timestamps) - z
if np.isnan(Numpyarray_Final[i][k-1][j]):
Numpyarray_Final[i][k-1][j] = Numpyarray_Final[i][k][j]
pickle.dump(Numpyarray_Final,open(op_folderpath +'ALLDATA.pkl', 'wb' ),-1)
def main():
genData()
if __name__ == '__main__':
main()
| bsd-3-clause |
dreuven/SampleSparse | SampleSparse/tests/classicLAHMCSampling/PersonalPlotting.py | 3 | 9713 | import matplotlib.pyplot as plt
import numpy as np
import matplotlib.cm as cm
class PPlotting:
root_directory = None
def __init__(self, directory):
# try:
# str(directory)
# except:
# print("Cannot convert input to string. Put in a name!")
self.root_directory = str(directory)
def plot_a_matrix_mean(self,a_mean_matrix):
savepath = self.root_directory + "/a_mean_plot.png"
# print("SAVE PATH IS {0}".format(savepath))
plt.figure()
plt.title("A matrix mean")
plt.plot(a_mean_matrix)
plt.savefig(savepath)
plt.close()
def plot_a_activitation(self,a_coeff_matrix, i, num_receptive_fields, num_patches_from_image):
# num_receptive_fields = a_coeff_matrix.shape[0]
# num_patches_from_image = a_coeff_matrix.shape[1]
a_to_plot = a_coeff_matrix.reshape(num_receptive_fields, num_patches_from_image)[:,0]
plt.figure()
plt.title("Activity of a for an image patch graph")
plt.plot(a_to_plot)
savepath = self.root_directory + "/a_activity_iter:" + str(i) + ".png"
plt.savefig(savepath)
plt.close()
def plot_phis(self,phis, i):
plt.figure(figsize=(10,10))
k = phis.shape[1]
val_x_y = int(np.ceil(np.sqrt(k)))
size_of_patch = int(np.sqrt(phis.shape[0]))
plt.imshow(self.tile_raster_images(phis.T,(size_of_patch, size_of_patch), [val_x_y,val_x_y]), cmap = cm.Greys_r, interpolation="nearest")
savepath = self.root_directory +'/PhiPlots_iter:' + str(i) + '.png'
plt.title("Receptive fields")
plt.savefig(savepath, format='png', dpi=500)
plt.close()
def plot_energy_over_time(self,energy_values):
plt.figure()
plt.title("Energy Value")
plt.plot(energy_values)
savepath = self.root_directory +"/EnergyVals.png"
plt.savefig(savepath)
plt.close()
def plot_reconstruction_error_over_time(self,reconstruction_error_arr):
plt.figure()
plt.title("Reconstruction Error Over Time")
plt.plot(reconstruction_error_arr)
savepath = self.root_directory +"/ReconstructionError.png"
plt.savefig(savepath)
plt.close()
def plot_input_data(self,image_patch_data, i):
# Note: Assuming image_patch_data is p x N matrix
size = np.sqrt(image_patch_data.shape[0])
num_images = int(np.ceil(np.sqrt(image_patch_data.shape[1])))
im_arr = self.tile_raster_images(image_patch_data.T,[size ,size ],[num_images,num_images])
savePath = self.root_directory +"/input_data_iter_{0}.png".format(i)
plt.title("Input Data")
plt.imshow(im_arr, cmap=cm.Greys_r)
plt.savefig(savePath)
plt.close()
def plot_reconstructions(self,reconstruction,i):
# Note: assuming reconstruction is p x N matrix
size = np.sqrt(reconstruction.shape[0])
num_images = int(np.ceil(np.sqrt(reconstruction.shape[1])))
im_arr = self.tile_raster_images(reconstruction.T,[size ,size],[num_images,num_images])
savePath = self.root_directory +"/reconstructions_iter_{0}.png".format(i)
plt.title("Reconstruction Data")
plt.imshow(im_arr, cmap=cm.Greys_r)
plt.savefig(savePath)
plt.close()
def create_and_show_receptive_field_poster(self,receptive_fields, size_space_between, num_per_row, num_per_column, iteration_num):
num_receptive_fields = receptive_fields.shape[1]
# Making assumption that all receptive fields are square!
size_receptive = int(np.sqrt(receptive_fields.shape[0]))
if num_receptive_fields > num_per_row * num_per_column:
print("Impossible to fit all receptive fields onto this poster")
return
size_row_of_poster = num_per_row * size_receptive + (num_per_row - 1) * size_space_between
size_col_of_poster = num_per_column * size_receptive + (num_per_column - 1) * size_space_between
poster_image = np.zeros((size_row_of_poster, size_col_of_poster))
row_index = 0
col_index = 0
for r_field in range(num_receptive_fields):
curr_receptive_field = receptive_fields[:,r_field].reshape(size_receptive, size_receptive)
poster_image[row_index:row_index + size_receptive, col_index: col_index + size_receptive] = curr_receptive_field
col_index = col_index + size_receptive + size_space_between
if col_index - size_space_between == size_col_of_poster:
col_index = 0
row_index = row_index + size_receptive + size_space_between
plt.imshow(poster_image, cmap=cm.Greys_r)
savepath = self.root_directory +'/PhiPlots_iter:' + str(iteration_num) + '.png'
plt.title("Receptive fields")
plt.savefig(savepath)
plt.close()
def scale_to_unit_interval(self,ndar, eps=1e-8):
# """ Scales all values in the ndarray ndar to be between 0 and 1 """
ndar = ndar.copy()
ndar -= ndar.min()
ndar *= 1.0 / (ndar.max() + eps)
return ndar
def tile_raster_images(self,X, img_shape, tile_shape, tile_spacing=(2, 2),
scale_rows_to_unit_interval=True,
output_pixel_vals=True):
"""
Transform an array with one flattened image per row, into an array in
which images are reshaped and layed out like tiles on a floor.
This function is useful for visualizing datasets whose rows are images,
and also columns of matrices for transforming those rows
(such as the first layer of a neural net).
:type X: a 2-D ndarray or a tuple of 4 channels, elements of which can
be 2-D ndarrays or None;
:param X: a 2-D array in which every row is a flattened image.
:type img_shape: tuple; (height, width)
:param img_shape: the original shape of each image
:type tile_shape: tuple; (rows, cols)
:param tile_shape: the number of images to tile (rows, cols)
:param output_pixel_vals: if output should be pixel values (i.e. int8
values) or floats
:param scale_rows_to_unit_interval: if the values need to be scaled before
being plotted to [0,1] or not
:returns: array suitable for viewing as an image.
(See:`Image.fromarray`.)
:rtype: a 2-d array with same dtype as X.
"""
assert len(img_shape) == 2
assert len(tile_shape) == 2
assert len(tile_spacing) == 2
# The expression below can be re-written in a more C style as
# follows :
# out_shape = [0,0]
# out_shape[0] = (img_shape[0] + tile_spacing[0]) * tile_shape[0] -
# tile_spacing[0]
# out_shape[1] = (img_shape[1] + tile_spacing[1]) * tile_shape[1] -
# tile_spacing[1]
out_shape = [(ishp + tsp) * tshp - tsp for ishp, tshp, tsp
in zip(img_shape, tile_shape, tile_spacing)]
if isinstance(X, tuple):
assert len(X) == 4
# Create an output np ndarray to store the image
if output_pixel_vals:
out_array = np.zeros((out_shape[0], out_shape[1], 4), dtype='uint8')
else:
out_array = np.zeros((out_shape[0], out_shape[1], 4), dtype=X.dtype)
# colors default to 0, alpha defaults to 1 (opaque)
if output_pixel_vals:
channel_defaults = [0, 0, 0, 255]
else:
channel_defaults = [0., 0., 0., 1.]
for i in range(4):
if X[i] is None:
# if channel is None, fill it with zeros of the correct
# dtype
out_array[:, :, i] = np.zeros(out_shape,
dtype='uint8' if output_pixel_vals else out_array.dtype
) + channel_defaults[i]
else:
# use a recurrent call to compute the channel and store it
# in the output
out_array[:, :, i] = tile_raster_images(X[i], img_shape, tile_shape, tile_spacing, scale_rows_to_unit_interval, output_pixel_vals)
return out_array
else:
# if we are dealing with only one channel
H, W = img_shape
Hs, Ws = tile_spacing
# generate a matrix to store the output
out_array = np.zeros(out_shape, dtype='uint8' if output_pixel_vals else X.dtype)
for tile_row in range(tile_shape[0]):
for tile_col in range(tile_shape[1]):
if tile_row * tile_shape[1] + tile_col < X.shape[0]:
if scale_rows_to_unit_interval:
# if we should scale values to be between 0 and 1
# do this by calling the `scale_to_unit_interval`
# function
this_img = self.scale_to_unit_interval(X[tile_row * tile_shape[1] + tile_col].reshape(img_shape))
else:
this_img = X[tile_row * tile_shape[1] + tile_col].reshape(img_shape)
# add the slice to the corresponding position in the
# output array
out_array[
tile_row * (H+Hs): tile_row * (H + Hs) + H,
tile_col * (W+Ws): tile_col * (W + Ws) + W
] \
= this_img * (255 if output_pixel_vals else 1)
return out_array | gpl-3.0 |
yaroslavvb/tensorflow | tensorflow/contrib/learn/python/learn/tests/dataframe/arithmetic_transform_test.py | 62 | 2343 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for arithmetic transforms."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.learn.python.learn.dataframe import tensorflow_dataframe as df
from tensorflow.python.platform import test
# pylint: disable=g-import-not-at-top
try:
import pandas as pd
HAS_PANDAS = True
except ImportError:
HAS_PANDAS = False
class SumTestCase(test.TestCase):
"""Test class for `Sum` transform."""
def testSum(self):
if not HAS_PANDAS:
return
num_rows = 100
pandas_df = pd.DataFrame({
"a": np.arange(num_rows),
"b": np.arange(num_rows, 2 * num_rows)
})
frame = df.TensorFlowDataFrame.from_pandas(
pandas_df, shuffle=False, batch_size=num_rows)
frame["a+b"] = frame["a"] + frame["b"]
expected_sum = pandas_df["a"] + pandas_df["b"]
actual_sum = frame.run_one_batch()["a+b"]
np.testing.assert_array_equal(expected_sum, actual_sum)
class DifferenceTestCase(test.TestCase):
"""Test class for `Difference` transform."""
def testDifference(self):
if not HAS_PANDAS:
return
num_rows = 100
pandas_df = pd.DataFrame({
"a": np.arange(num_rows),
"b": np.arange(num_rows, 2 * num_rows)
})
frame = df.TensorFlowDataFrame.from_pandas(
pandas_df, shuffle=False, batch_size=num_rows)
frame["a-b"] = frame["a"] - frame["b"]
expected_diff = pandas_df["a"] - pandas_df["b"]
actual_diff = frame.run_one_batch()["a-b"]
np.testing.assert_array_equal(expected_diff, actual_diff)
if __name__ == "__main__":
test.main()
| apache-2.0 |
MonoCloud/zipline | tests/history_cases.py | 7 | 21388 | """
Test case definitions for history tests.
"""
import pandas as pd
import numpy as np
from zipline.finance.trading import TradingEnvironment
from zipline.history.history import HistorySpec
from zipline.protocol import BarData
from zipline.utils.test_utils import to_utc
_cases_env = TradingEnvironment()
def mixed_frequency_expected_index(count, frequency):
"""
Helper for enumerating expected indices for test_mixed_frequency.
"""
minute = MIXED_FREQUENCY_MINUTES[count]
if frequency == '1d':
return [_cases_env.previous_open_and_close(minute)[1], minute]
elif frequency == '1m':
return [_cases_env.previous_market_minute(minute), minute]
def mixed_frequency_expected_data(count, frequency):
"""
Helper for enumerating expected data test_mixed_frequency.
"""
if frequency == '1d':
# First day of this test is July 3rd, which is a half day.
if count < 210:
return [np.nan, count]
else:
return [209, count]
elif frequency == '1m':
if count == 0:
return [np.nan, count]
else:
return [count - 1, count]
MIXED_FREQUENCY_MINUTES = _cases_env.market_minute_window(
to_utc('2013-07-03 9:31AM'), 600,
)
ONE_MINUTE_PRICE_ONLY_SPECS = [
HistorySpec(1, '1m', 'price', True, _cases_env, data_frequency='minute'),
]
DAILY_OPEN_CLOSE_SPECS = [
HistorySpec(3, '1d', 'open_price', False, _cases_env,
data_frequency='minute'),
HistorySpec(3, '1d', 'close_price', False, _cases_env,
data_frequency='minute'),
]
ILLIQUID_PRICES_SPECS = [
HistorySpec(3, '1m', 'price', False, _cases_env, data_frequency='minute'),
HistorySpec(5, '1m', 'price', True, _cases_env, data_frequency='minute'),
]
MIXED_FREQUENCY_SPECS = [
HistorySpec(1, '1m', 'price', False, _cases_env, data_frequency='minute'),
HistorySpec(2, '1m', 'price', False, _cases_env, data_frequency='minute'),
HistorySpec(2, '1d', 'price', False, _cases_env, data_frequency='minute'),
]
MIXED_FIELDS_SPECS = [
HistorySpec(3, '1m', 'price', True, _cases_env, data_frequency='minute'),
HistorySpec(3, '1m', 'open_price', True, _cases_env,
data_frequency='minute'),
HistorySpec(3, '1m', 'close_price', True, _cases_env,
data_frequency='minute'),
HistorySpec(3, '1m', 'high', True, _cases_env, data_frequency='minute'),
HistorySpec(3, '1m', 'low', True, _cases_env, data_frequency='minute'),
HistorySpec(3, '1m', 'volume', True, _cases_env, data_frequency='minute'),
]
HISTORY_CONTAINER_TEST_CASES = {
# June 2013
# Su Mo Tu We Th Fr Sa
# 1
# 2 3 4 5 6 7 8
# 9 10 11 12 13 14 15
# 16 17 18 19 20 21 22
# 23 24 25 26 27 28 29
# 30
'test one minute price only': {
# A list of HistorySpec objects.
'specs': ONE_MINUTE_PRICE_ONLY_SPECS,
# Sids for the test.
'sids': [1],
# Start date for test.
'dt': to_utc('2013-06-21 9:31AM'),
# Sequency of updates to the container
'updates': [
BarData(
{
1: {
'price': 5,
'dt': to_utc('2013-06-21 9:31AM'),
},
},
),
BarData(
{
1: {
'price': 6,
'dt': to_utc('2013-06-21 9:32AM'),
},
},
),
],
# Expected results
'expected': {
ONE_MINUTE_PRICE_ONLY_SPECS[0].key_str: [
pd.DataFrame(
data={
1: [5],
},
index=[
to_utc('2013-06-21 9:31AM'),
],
),
pd.DataFrame(
data={
1: [6],
},
index=[
to_utc('2013-06-21 9:32AM'),
],
),
],
},
},
'test daily open close': {
# A list of HistorySpec objects.
'specs': DAILY_OPEN_CLOSE_SPECS,
# Sids for the test.
'sids': [1],
# Start date for test.
'dt': to_utc('2013-06-21 9:31AM'),
# Sequence of updates to the container
'updates': [
BarData(
{
1: {
'open_price': 10,
'close_price': 11,
'dt': to_utc('2013-06-21 10:00AM'),
},
},
),
BarData(
{
1: {
'open_price': 12,
'close_price': 13,
'dt': to_utc('2013-06-21 3:30PM'),
},
},
),
BarData(
{
1: {
'open_price': 14,
'close_price': 15,
# Wait a full market day before the next bar.
# We should end up with nans for Monday the 24th.
'dt': to_utc('2013-06-25 9:31AM'),
},
},
),
],
# Dictionary mapping spec_key -> list of expected outputs
'expected': {
# open
DAILY_OPEN_CLOSE_SPECS[0].key_str: [
pd.DataFrame(
data={
1: [np.nan, np.nan, 10]
},
index=[
to_utc('2013-06-19 4:00PM'),
to_utc('2013-06-20 4:00PM'),
to_utc('2013-06-21 10:00AM'),
],
),
pd.DataFrame(
data={
1: [np.nan, np.nan, 10]
},
index=[
to_utc('2013-06-19 4:00PM'),
to_utc('2013-06-20 4:00PM'),
to_utc('2013-06-21 3:30PM'),
],
),
pd.DataFrame(
data={
1: [10, np.nan, 14]
},
index=[
to_utc('2013-06-21 4:00PM'),
to_utc('2013-06-24 4:00PM'),
to_utc('2013-06-25 9:31AM'),
],
),
],
# close
DAILY_OPEN_CLOSE_SPECS[1].key_str: [
pd.DataFrame(
data={
1: [np.nan, np.nan, 11]
},
index=[
to_utc('2013-06-19 4:00PM'),
to_utc('2013-06-20 4:00PM'),
to_utc('2013-06-21 10:00AM'),
],
),
pd.DataFrame(
data={
1: [np.nan, np.nan, 13]
},
index=[
to_utc('2013-06-19 4:00PM'),
to_utc('2013-06-20 4:00PM'),
to_utc('2013-06-21 3:30PM'),
],
),
pd.DataFrame(
data={
1: [13, np.nan, 15]
},
index=[
to_utc('2013-06-21 4:00PM'),
to_utc('2013-06-24 4:00PM'),
to_utc('2013-06-25 9:31AM'),
],
),
],
},
},
'test illiquid prices': {
# A list of HistorySpec objects.
'specs': ILLIQUID_PRICES_SPECS,
# Sids for the test.
'sids': [1],
# Start date for test.
'dt': to_utc('2013-06-28 9:31AM'),
# Sequence of updates to the container
'updates': [
BarData(
{
1: {
'price': 10,
'dt': to_utc('2013-06-28 9:31AM'),
},
},
),
BarData(
{
1: {
'price': 11,
'dt': to_utc('2013-06-28 9:32AM'),
},
},
),
BarData(
{
1: {
'price': 12,
'dt': to_utc('2013-06-28 9:33AM'),
},
},
),
BarData(
{
1: {
'price': 13,
# Note: Skipping 9:34 to simulate illiquid bar/missing
# data.
'dt': to_utc('2013-06-28 9:35AM'),
},
},
),
],
# Dictionary mapping spec_key -> list of expected outputs
'expected': {
ILLIQUID_PRICES_SPECS[0].key_str: [
pd.DataFrame(
data={
1: [np.nan, np.nan, 10],
},
index=[
to_utc('2013-06-27 3:59PM'),
to_utc('2013-06-27 4:00PM'),
to_utc('2013-06-28 9:31AM'),
],
),
pd.DataFrame(
data={
1: [np.nan, 10, 11],
},
index=[
to_utc('2013-06-27 4:00PM'),
to_utc('2013-06-28 9:31AM'),
to_utc('2013-06-28 9:32AM'),
],
),
pd.DataFrame(
data={
1: [10, 11, 12],
},
index=[
to_utc('2013-06-28 9:31AM'),
to_utc('2013-06-28 9:32AM'),
to_utc('2013-06-28 9:33AM'),
],
),
# Since there's no update for 9:34, this is called at 9:35.
pd.DataFrame(
data={
1: [12, np.nan, 13],
},
index=[
to_utc('2013-06-28 9:33AM'),
to_utc('2013-06-28 9:34AM'),
to_utc('2013-06-28 9:35AM'),
],
),
],
ILLIQUID_PRICES_SPECS[1].key_str: [
pd.DataFrame(
data={
1: [np.nan, np.nan, np.nan, np.nan, 10],
},
index=[
to_utc('2013-06-27 3:57PM'),
to_utc('2013-06-27 3:58PM'),
to_utc('2013-06-27 3:59PM'),
to_utc('2013-06-27 4:00PM'),
to_utc('2013-06-28 9:31AM'),
],
),
pd.DataFrame(
data={
1: [np.nan, np.nan, np.nan, 10, 11],
},
index=[
to_utc('2013-06-27 3:58PM'),
to_utc('2013-06-27 3:59PM'),
to_utc('2013-06-27 4:00PM'),
to_utc('2013-06-28 9:31AM'),
to_utc('2013-06-28 9:32AM'),
],
),
pd.DataFrame(
data={
1: [np.nan, np.nan, 10, 11, 12],
},
index=[
to_utc('2013-06-27 3:59PM'),
to_utc('2013-06-27 4:00PM'),
to_utc('2013-06-28 9:31AM'),
to_utc('2013-06-28 9:32AM'),
to_utc('2013-06-28 9:33AM'),
],
),
# Since there's no update for 9:34, this is called at 9:35.
# The 12 value from 9:33 should be forward-filled.
pd.DataFrame(
data={
1: [10, 11, 12, 12, 13],
},
index=[
to_utc('2013-06-28 9:31AM'),
to_utc('2013-06-28 9:32AM'),
to_utc('2013-06-28 9:33AM'),
to_utc('2013-06-28 9:34AM'),
to_utc('2013-06-28 9:35AM'),
],
),
],
},
},
'test mixed frequencies': {
# A list of HistorySpec objects.
'specs': MIXED_FREQUENCY_SPECS,
# Sids for the test.
'sids': [1],
# Start date for test.
# July 2013
# Su Mo Tu We Th Fr Sa
# 1 2 3 4 5 6
# 7 8 9 10 11 12 13
# 14 15 16 17 18 19 20
# 21 22 23 24 25 26 27
# 28 29 30 31
'dt': to_utc('2013-07-03 9:31AM'),
# Sequence of updates to the container
'updates': [
BarData(
{
1: {
'price': count,
'dt': dt,
}
}
)
for count, dt in enumerate(MIXED_FREQUENCY_MINUTES)
],
# Dictionary mapping spec_key -> list of expected outputs.
'expected': {
MIXED_FREQUENCY_SPECS[0].key_str: [
pd.DataFrame(
data={
1: [count],
},
index=[minute],
)
for count, minute in enumerate(MIXED_FREQUENCY_MINUTES)
],
MIXED_FREQUENCY_SPECS[1].key_str: [
pd.DataFrame(
data={
1: mixed_frequency_expected_data(count, '1m'),
},
index=mixed_frequency_expected_index(count, '1m'),
)
for count in range(len(MIXED_FREQUENCY_MINUTES))
],
MIXED_FREQUENCY_SPECS[2].key_str: [
pd.DataFrame(
data={
1: mixed_frequency_expected_data(count, '1d'),
},
index=mixed_frequency_expected_index(count, '1d'),
)
for count in range(len(MIXED_FREQUENCY_MINUTES))
]
},
},
'test multiple fields and sids': {
# A list of HistorySpec objects.
'specs': MIXED_FIELDS_SPECS,
# Sids for the test.
'sids': [1, 10],
# Start date for test.
'dt': to_utc('2013-06-28 9:31AM'),
# Sequence of updates to the container
'updates': [
BarData(
{
1: {
'dt': dt,
'price': count,
'open_price': count,
'close_price': count,
'high': count,
'low': count,
'volume': count,
},
10: {
'dt': dt,
'price': count * 10,
'open_price': count * 10,
'close_price': count * 10,
'high': count * 10,
'low': count * 10,
'volume': count * 10,
},
},
)
for count, dt in enumerate([
to_utc('2013-06-28 9:31AM'),
to_utc('2013-06-28 9:32AM'),
to_utc('2013-06-28 9:33AM'),
# NOTE: No update for 9:34
to_utc('2013-06-28 9:35AM'),
])
],
# Dictionary mapping spec_key -> list of expected outputs
'expected': dict(
# Build a dict from a list of tuples. Doing it this way because
# there are two distinct cases we want to test: forward-fillable
# fields and non-forward-fillable fields.
[
(
# Non forward-fill fields
key,
[
pd.DataFrame(
data={
1: [np.nan, np.nan, 0],
10: [np.nan, np.nan, 0],
},
index=[
to_utc('2013-06-27 3:59PM'),
to_utc('2013-06-27 4:00PM'),
to_utc('2013-06-28 9:31AM'),
],
),
pd.DataFrame(
data={
1: [np.nan, 0, 1],
10: [np.nan, 0, 10],
},
index=[
to_utc('2013-06-27 4:00PM'),
to_utc('2013-06-28 9:31AM'),
to_utc('2013-06-28 9:32AM'),
],
),
pd.DataFrame(
data={
1: [0, 1, 2],
10: [0, 10, 20],
},
index=[
to_utc('2013-06-28 9:31AM'),
to_utc('2013-06-28 9:32AM'),
to_utc('2013-06-28 9:33AM'),
],
),
pd.DataFrame(
data={
1: [2, np.nan, 3],
10: [20, np.nan, 30],
},
index=[
to_utc('2013-06-28 9:33AM'),
to_utc('2013-06-28 9:34AM'),
to_utc('2013-06-28 9:35AM'),
],
# For volume, when we are missing data, we replace
# it with 0s to show that no trades occured.
).fillna(0 if 'volume' in key else np.nan),
],
)
for key in [spec.key_str for spec in MIXED_FIELDS_SPECS
if spec.field not in HistorySpec.FORWARD_FILLABLE]
] +
# Concatenate the expected results for non-ffillable with
# expected result for ffillable.
[
(
# Forward-fillable fields
key,
[
pd.DataFrame(
data={
1: [np.nan, np.nan, 0],
10: [np.nan, np.nan, 0],
},
index=[
to_utc('2013-06-27 3:59PM'),
to_utc('2013-06-27 4:00PM'),
to_utc('2013-06-28 9:31AM'),
],
),
pd.DataFrame(
data={
1: [np.nan, 0, 1],
10: [np.nan, 0, 10],
},
index=[
to_utc('2013-06-27 4:00PM'),
to_utc('2013-06-28 9:31AM'),
to_utc('2013-06-28 9:32AM'),
],
),
pd.DataFrame(
data={
1: [0, 1, 2],
10: [0, 10, 20],
},
index=[
to_utc('2013-06-28 9:31AM'),
to_utc('2013-06-28 9:32AM'),
to_utc('2013-06-28 9:33AM'),
],
),
pd.DataFrame(
data={
1: [2, 2, 3],
10: [20, 20, 30],
},
index=[
to_utc('2013-06-28 9:33AM'),
to_utc('2013-06-28 9:34AM'),
to_utc('2013-06-28 9:35AM'),
],
),
],
)
for key in [spec.key_str for spec in MIXED_FIELDS_SPECS
if spec.field in HistorySpec.FORWARD_FILLABLE]
]
),
},
}
| apache-2.0 |
antepsis/anteplahmacun | sympy/physics/quantum/tensorproduct.py | 23 | 13565 | """Abstract tensor product."""
from __future__ import print_function, division
from sympy import Expr, Add, Mul, Matrix, Pow, sympify
from sympy.core.compatibility import range
from sympy.core.trace import Tr
from sympy.printing.pretty.stringpict import prettyForm
from sympy.physics.quantum.qexpr import QuantumError
from sympy.physics.quantum.dagger import Dagger
from sympy.physics.quantum.commutator import Commutator
from sympy.physics.quantum.anticommutator import AntiCommutator
from sympy.physics.quantum.state import Ket, Bra
from sympy.physics.quantum.matrixutils import (
numpy_ndarray,
scipy_sparse_matrix,
matrix_tensor_product
)
__all__ = [
'TensorProduct',
'tensor_product_simp'
]
#-----------------------------------------------------------------------------
# Tensor product
#-----------------------------------------------------------------------------
_combined_printing = False
def combined_tensor_printing(combined):
"""Set flag controlling whether tensor products of states should be
printed as a combined bra/ket or as an explicit tensor product of different
bra/kets. This is a global setting for all TensorProduct class instances.
Parameters
----------
combine : bool
When true, tensor product states are combined into one ket/bra, and
when false explicit tensor product notation is used between each
ket/bra.
"""
global _combined_printing
_combined_printing = combined
class TensorProduct(Expr):
"""The tensor product of two or more arguments.
For matrices, this uses ``matrix_tensor_product`` to compute the Kronecker
or tensor product matrix. For other objects a symbolic ``TensorProduct``
instance is returned. The tensor product is a non-commutative
multiplication that is used primarily with operators and states in quantum
mechanics.
Currently, the tensor product distinguishes between commutative and non-
commutative arguments. Commutative arguments are assumed to be scalars and
are pulled out in front of the ``TensorProduct``. Non-commutative arguments
remain in the resulting ``TensorProduct``.
Parameters
==========
args : tuple
A sequence of the objects to take the tensor product of.
Examples
========
Start with a simple tensor product of sympy matrices::
>>> from sympy import I, Matrix, symbols
>>> from sympy.physics.quantum import TensorProduct
>>> m1 = Matrix([[1,2],[3,4]])
>>> m2 = Matrix([[1,0],[0,1]])
>>> TensorProduct(m1, m2)
Matrix([
[1, 0, 2, 0],
[0, 1, 0, 2],
[3, 0, 4, 0],
[0, 3, 0, 4]])
>>> TensorProduct(m2, m1)
Matrix([
[1, 2, 0, 0],
[3, 4, 0, 0],
[0, 0, 1, 2],
[0, 0, 3, 4]])
We can also construct tensor products of non-commutative symbols:
>>> from sympy import Symbol
>>> A = Symbol('A',commutative=False)
>>> B = Symbol('B',commutative=False)
>>> tp = TensorProduct(A, B)
>>> tp
AxB
We can take the dagger of a tensor product (note the order does NOT reverse
like the dagger of a normal product):
>>> from sympy.physics.quantum import Dagger
>>> Dagger(tp)
Dagger(A)xDagger(B)
Expand can be used to distribute a tensor product across addition:
>>> C = Symbol('C',commutative=False)
>>> tp = TensorProduct(A+B,C)
>>> tp
(A + B)xC
>>> tp.expand(tensorproduct=True)
AxC + BxC
"""
is_commutative = False
def __new__(cls, *args):
if isinstance(args[0], (Matrix, numpy_ndarray, scipy_sparse_matrix)):
return matrix_tensor_product(*args)
c_part, new_args = cls.flatten(sympify(args))
c_part = Mul(*c_part)
if len(new_args) == 0:
return c_part
elif len(new_args) == 1:
return c_part * new_args[0]
else:
tp = Expr.__new__(cls, *new_args)
return c_part * tp
@classmethod
def flatten(cls, args):
# TODO: disallow nested TensorProducts.
c_part = []
nc_parts = []
for arg in args:
cp, ncp = arg.args_cnc()
c_part.extend(list(cp))
nc_parts.append(Mul._from_args(ncp))
return c_part, nc_parts
def _eval_adjoint(self):
return TensorProduct(*[Dagger(i) for i in self.args])
def _eval_rewrite(self, pattern, rule, **hints):
sargs = self.args
terms = [t._eval_rewrite(pattern, rule, **hints) for t in sargs]
return TensorProduct(*terms).expand(tensorproduct=True)
def _sympystr(self, printer, *args):
from sympy.printing.str import sstr
length = len(self.args)
s = ''
for i in range(length):
if isinstance(self.args[i], (Add, Pow, Mul)):
s = s + '('
s = s + sstr(self.args[i])
if isinstance(self.args[i], (Add, Pow, Mul)):
s = s + ')'
if i != length - 1:
s = s + 'x'
return s
def _pretty(self, printer, *args):
if (_combined_printing and
(all([isinstance(arg, Ket) for arg in self.args]) or
all([isinstance(arg, Bra) for arg in self.args]))):
length = len(self.args)
pform = printer._print('', *args)
for i in range(length):
next_pform = printer._print('', *args)
length_i = len(self.args[i].args)
for j in range(length_i):
part_pform = printer._print(self.args[i].args[j], *args)
next_pform = prettyForm(*next_pform.right(part_pform))
if j != length_i - 1:
next_pform = prettyForm(*next_pform.right(', '))
if len(self.args[i].args) > 1:
next_pform = prettyForm(
*next_pform.parens(left='{', right='}'))
pform = prettyForm(*pform.right(next_pform))
if i != length - 1:
pform = prettyForm(*pform.right(',' + ' '))
pform = prettyForm(*pform.left(self.args[0].lbracket))
pform = prettyForm(*pform.right(self.args[0].rbracket))
return pform
length = len(self.args)
pform = printer._print('', *args)
for i in range(length):
next_pform = printer._print(self.args[i], *args)
if isinstance(self.args[i], (Add, Mul)):
next_pform = prettyForm(
*next_pform.parens(left='(', right=')')
)
pform = prettyForm(*pform.right(next_pform))
if i != length - 1:
if printer._use_unicode:
pform = prettyForm(*pform.right(u'\N{N-ARY CIRCLED TIMES OPERATOR}' + u' '))
else:
pform = prettyForm(*pform.right('x' + ' '))
return pform
def _latex(self, printer, *args):
if (_combined_printing and
(all([isinstance(arg, Ket) for arg in self.args]) or
all([isinstance(arg, Bra) for arg in self.args]))):
def _label_wrap(label, nlabels):
return label if nlabels == 1 else r"\left\{%s\right\}" % label
s = r", ".join([_label_wrap(arg._print_label_latex(printer, *args),
len(arg.args)) for arg in self.args])
return r"{%s%s%s}" % (self.args[0].lbracket_latex, s,
self.args[0].rbracket_latex)
length = len(self.args)
s = ''
for i in range(length):
if isinstance(self.args[i], (Add, Mul)):
s = s + '\\left('
# The extra {} brackets are needed to get matplotlib's latex
# rendered to render this properly.
s = s + '{' + printer._print(self.args[i], *args) + '}'
if isinstance(self.args[i], (Add, Mul)):
s = s + '\\right)'
if i != length - 1:
s = s + '\\otimes '
return s
def doit(self, **hints):
return TensorProduct(*[item.doit(**hints) for item in self.args])
def _eval_expand_tensorproduct(self, **hints):
"""Distribute TensorProducts across addition."""
args = self.args
add_args = []
stop = False
for i in range(len(args)):
if isinstance(args[i], Add):
for aa in args[i].args:
tp = TensorProduct(*args[:i] + (aa,) + args[i + 1:])
if isinstance(tp, TensorProduct):
tp = tp._eval_expand_tensorproduct()
add_args.append(tp)
break
if add_args:
return Add(*add_args)
else:
return self
def _eval_trace(self, **kwargs):
indices = kwargs.get('indices', None)
exp = tensor_product_simp(self)
if indices is None or len(indices) == 0:
return Mul(*[Tr(arg).doit() for arg in exp.args])
else:
return Mul(*[Tr(value).doit() if idx in indices else value
for idx, value in enumerate(exp.args)])
def tensor_product_simp_Mul(e):
"""Simplify a Mul with TensorProducts.
Current the main use of this is to simplify a ``Mul`` of ``TensorProduct``s
to a ``TensorProduct`` of ``Muls``. It currently only works for relatively
simple cases where the initial ``Mul`` only has scalars and raw
``TensorProduct``s, not ``Add``, ``Pow``, ``Commutator``s of
``TensorProduct``s.
Parameters
==========
e : Expr
A ``Mul`` of ``TensorProduct``s to be simplified.
Returns
=======
e : Expr
A ``TensorProduct`` of ``Mul``s.
Examples
========
This is an example of the type of simplification that this function
performs::
>>> from sympy.physics.quantum.tensorproduct import \
tensor_product_simp_Mul, TensorProduct
>>> from sympy import Symbol
>>> A = Symbol('A',commutative=False)
>>> B = Symbol('B',commutative=False)
>>> C = Symbol('C',commutative=False)
>>> D = Symbol('D',commutative=False)
>>> e = TensorProduct(A,B)*TensorProduct(C,D)
>>> e
AxB*CxD
>>> tensor_product_simp_Mul(e)
(A*C)x(B*D)
"""
# TODO: This won't work with Muls that have other composites of
# TensorProducts, like an Add, Pow, Commutator, etc.
# TODO: This only works for the equivalent of single Qbit gates.
if not isinstance(e, Mul):
return e
c_part, nc_part = e.args_cnc()
n_nc = len(nc_part)
if n_nc == 0 or n_nc == 1:
return e
elif e.has(TensorProduct):
current = nc_part[0]
if not isinstance(current, TensorProduct):
raise TypeError('TensorProduct expected, got: %r' % current)
n_terms = len(current.args)
new_args = list(current.args)
for next in nc_part[1:]:
# TODO: check the hilbert spaces of next and current here.
if isinstance(next, TensorProduct):
if n_terms != len(next.args):
raise QuantumError(
'TensorProducts of different lengths: %r and %r' %
(current, next)
)
for i in range(len(new_args)):
new_args[i] = new_args[i] * next.args[i]
else:
# this won't quite work as we don't want next in the
# TensorProduct
for i in range(len(new_args)):
new_args[i] = new_args[i] * next
current = next
return Mul(*c_part) * TensorProduct(*new_args)
else:
return e
def tensor_product_simp(e, **hints):
"""Try to simplify and combine TensorProducts.
In general this will try to pull expressions inside of ``TensorProducts``.
It currently only works for relatively simple cases where the products have
only scalars, raw ``TensorProducts``, not ``Add``, ``Pow``, ``Commutators``
of ``TensorProducts``. It is best to see what it does by showing examples.
Examples
========
>>> from sympy.physics.quantum import tensor_product_simp
>>> from sympy.physics.quantum import TensorProduct
>>> from sympy import Symbol
>>> A = Symbol('A',commutative=False)
>>> B = Symbol('B',commutative=False)
>>> C = Symbol('C',commutative=False)
>>> D = Symbol('D',commutative=False)
First see what happens to products of tensor products:
>>> e = TensorProduct(A,B)*TensorProduct(C,D)
>>> e
AxB*CxD
>>> tensor_product_simp(e)
(A*C)x(B*D)
This is the core logic of this function, and it works inside, powers, sums,
commutators and anticommutators as well:
>>> tensor_product_simp(e**2)
(A*C)x(B*D)**2
"""
if isinstance(e, Add):
return Add(*[tensor_product_simp(arg) for arg in e.args])
elif isinstance(e, Pow):
return tensor_product_simp(e.base) ** e.exp
elif isinstance(e, Mul):
return tensor_product_simp_Mul(e)
elif isinstance(e, Commutator):
return Commutator(*[tensor_product_simp(arg) for arg in e.args])
elif isinstance(e, AntiCommutator):
return AntiCommutator(*[tensor_product_simp(arg) for arg in e.args])
else:
return e
| bsd-3-clause |
xyguo/scikit-learn | examples/exercises/plot_cv_diabetes.py | 53 | 2861 | """
===============================================
Cross-validation on diabetes Dataset Exercise
===============================================
A tutorial exercise which uses cross-validation with linear models.
This exercise is used in the :ref:`cv_estimators_tut` part of the
:ref:`model_selection_tut` section of the :ref:`stat_learn_tut_index`.
"""
from __future__ import print_function
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import datasets
from sklearn.linear_model import LassoCV
from sklearn.linear_model import Lasso
from sklearn.model_selection import KFold
from sklearn.model_selection import cross_val_score
diabetes = datasets.load_diabetes()
X = diabetes.data[:150]
y = diabetes.target[:150]
lasso = Lasso(random_state=0)
alphas = np.logspace(-4, -0.5, 30)
scores = list()
scores_std = list()
n_folds = 3
for alpha in alphas:
lasso.alpha = alpha
this_scores = cross_val_score(lasso, X, y, cv=n_folds, n_jobs=1)
scores.append(np.mean(this_scores))
scores_std.append(np.std(this_scores))
scores, scores_std = np.array(scores), np.array(scores_std)
plt.figure().set_size_inches(8, 6)
plt.semilogx(alphas, scores)
# plot error lines showing +/- std. errors of the scores
std_error = scores_std / np.sqrt(n_folds)
plt.semilogx(alphas, scores + std_error, 'b--')
plt.semilogx(alphas, scores - std_error, 'b--')
# alpha=0.2 controls the translucency of the fill color
plt.fill_between(alphas, scores + std_error, scores - std_error, alpha=0.2)
plt.ylabel('CV score +/- std error')
plt.xlabel('alpha')
plt.axhline(np.max(scores), linestyle='--', color='.5')
plt.xlim([alphas[0], alphas[-1]])
##############################################################################
# Bonus: how much can you trust the selection of alpha?
# To answer this question we use the LassoCV object that sets its alpha
# parameter automatically from the data by internal cross-validation (i.e. it
# performs cross-validation on the training data it receives).
# We use external cross-validation to see how much the automatically obtained
# alphas differ across different cross-validation folds.
lasso_cv = LassoCV(alphas=alphas, random_state=0)
k_fold = KFold(3)
print("Answer to the bonus question:",
"how much can you trust the selection of alpha?")
print()
print("Alpha parameters maximising the generalization score on different")
print("subsets of the data:")
for k, (train, test) in enumerate(k_fold.split(X, y)):
lasso_cv.fit(X[train], y[train])
print("[fold {0}] alpha: {1:.5f}, score: {2:.5f}".
format(k, lasso_cv.alpha_, lasso_cv.score(X[test], y[test])))
print()
print("Answer: Not very much since we obtained different alphas for different")
print("subsets of the data and moreover, the scores for these alphas differ")
print("quite substantially.")
plt.show()
| bsd-3-clause |
tetherless-world/setlr | setup.py | 1 | 1523 | import os
from setuptools import setup, find_packages
# Utility function to read the README file.
# Used for the long_description. It's nice, because now 1) we have a top level
# README file and 2) it's easier to type in the README file than to put a raw
# string in below ...
def read(fname):
return open(os.path.join(os.path.dirname(__file__), fname)).read()
setup(
name = "setlr",
version = "0.2.16",
author = "Jamie McCusker",
author_email = "mccusj@cs.rpi.edu",
description = ("setlr is a tool for Semantic Extraction, Transformation, and Loading."),
license = "Apache License 2.0",
keywords = "rdf semantic etl",
url = "http://packages.python.org/setlr",
packages=['setlr'],
long_description='''SETLr is a tool for generating RDF graphs, including named graphs, from almost any kind of tabular data.''',
include_package_data = True,
install_requires = [
'future',
'pip>=9.0.0',
'cython',
'numpy',
'rdflib',
'rdflib-jsonld',
'pandas>=0.23.0',
'requests',
'toposort',
'beautifulsoup4',
'jinja2',
'lxml',
'six',
'xlrd',
'ijson',
'requests-testadapter',
'python-slugify',
],
entry_points = {
'console_scripts': ['setlr=setlr:main'],
},
classifiers=[
"Development Status :: 5 - Production/Stable",
"Topic :: Utilities",
"License :: OSI Approved :: Apache Software License",
],
)
| apache-2.0 |
Nyker510/scikit-learn | sklearn/ensemble/tests/test_bagging.py | 127 | 25365 | """
Testing for the bagging ensemble module (sklearn.ensemble.bagging).
"""
# Author: Gilles Louppe
# License: BSD 3 clause
import numpy as np
from sklearn.base import BaseEstimator
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import assert_warns_message
from sklearn.dummy import DummyClassifier, DummyRegressor
from sklearn.grid_search import GridSearchCV, ParameterGrid
from sklearn.ensemble import BaggingClassifier, BaggingRegressor
from sklearn.linear_model import Perceptron, LogisticRegression
from sklearn.neighbors import KNeighborsClassifier, KNeighborsRegressor
from sklearn.tree import DecisionTreeClassifier, DecisionTreeRegressor
from sklearn.svm import SVC, SVR
from sklearn.pipeline import make_pipeline
from sklearn.feature_selection import SelectKBest
from sklearn.cross_validation import train_test_split
from sklearn.datasets import load_boston, load_iris, make_hastie_10_2
from sklearn.utils import check_random_state
from scipy.sparse import csc_matrix, csr_matrix
rng = check_random_state(0)
# also load the iris dataset
# and randomly permute it
iris = load_iris()
perm = rng.permutation(iris.target.size)
iris.data = iris.data[perm]
iris.target = iris.target[perm]
# also load the boston dataset
# and randomly permute it
boston = load_boston()
perm = rng.permutation(boston.target.size)
boston.data = boston.data[perm]
boston.target = boston.target[perm]
def test_classification():
# Check classification for various parameter settings.
rng = check_random_state(0)
X_train, X_test, y_train, y_test = train_test_split(iris.data,
iris.target,
random_state=rng)
grid = ParameterGrid({"max_samples": [0.5, 1.0],
"max_features": [1, 2, 4],
"bootstrap": [True, False],
"bootstrap_features": [True, False]})
for base_estimator in [None,
DummyClassifier(),
Perceptron(),
DecisionTreeClassifier(),
KNeighborsClassifier(),
SVC()]:
for params in grid:
BaggingClassifier(base_estimator=base_estimator,
random_state=rng,
**params).fit(X_train, y_train).predict(X_test)
def test_sparse_classification():
# Check classification for various parameter settings on sparse input.
class CustomSVC(SVC):
"""SVC variant that records the nature of the training set"""
def fit(self, X, y):
super(CustomSVC, self).fit(X, y)
self.data_type_ = type(X)
return self
rng = check_random_state(0)
X_train, X_test, y_train, y_test = train_test_split(iris.data,
iris.target,
random_state=rng)
parameter_sets = [
{"max_samples": 0.5,
"max_features": 2,
"bootstrap": True,
"bootstrap_features": True},
{"max_samples": 1.0,
"max_features": 4,
"bootstrap": True,
"bootstrap_features": True},
{"max_features": 2,
"bootstrap": False,
"bootstrap_features": True},
{"max_samples": 0.5,
"bootstrap": True,
"bootstrap_features": False},
]
for sparse_format in [csc_matrix, csr_matrix]:
X_train_sparse = sparse_format(X_train)
X_test_sparse = sparse_format(X_test)
for params in parameter_sets:
# Trained on sparse format
sparse_classifier = BaggingClassifier(
base_estimator=CustomSVC(),
random_state=1,
**params
).fit(X_train_sparse, y_train)
sparse_results = sparse_classifier.predict(X_test_sparse)
# Trained on dense format
dense_results = BaggingClassifier(
base_estimator=CustomSVC(),
random_state=1,
**params
).fit(X_train, y_train).predict(X_test)
sparse_type = type(X_train_sparse)
types = [i.data_type_ for i in sparse_classifier.estimators_]
assert_array_equal(sparse_results, dense_results)
assert all([t == sparse_type for t in types])
def test_regression():
# Check regression for various parameter settings.
rng = check_random_state(0)
X_train, X_test, y_train, y_test = train_test_split(boston.data[:50],
boston.target[:50],
random_state=rng)
grid = ParameterGrid({"max_samples": [0.5, 1.0],
"max_features": [0.5, 1.0],
"bootstrap": [True, False],
"bootstrap_features": [True, False]})
for base_estimator in [None,
DummyRegressor(),
DecisionTreeRegressor(),
KNeighborsRegressor(),
SVR()]:
for params in grid:
BaggingRegressor(base_estimator=base_estimator,
random_state=rng,
**params).fit(X_train, y_train).predict(X_test)
def test_sparse_regression():
# Check regression for various parameter settings on sparse input.
rng = check_random_state(0)
X_train, X_test, y_train, y_test = train_test_split(boston.data[:50],
boston.target[:50],
random_state=rng)
class CustomSVR(SVR):
"""SVC variant that records the nature of the training set"""
def fit(self, X, y):
super(CustomSVR, self).fit(X, y)
self.data_type_ = type(X)
return self
parameter_sets = [
{"max_samples": 0.5,
"max_features": 2,
"bootstrap": True,
"bootstrap_features": True},
{"max_samples": 1.0,
"max_features": 4,
"bootstrap": True,
"bootstrap_features": True},
{"max_features": 2,
"bootstrap": False,
"bootstrap_features": True},
{"max_samples": 0.5,
"bootstrap": True,
"bootstrap_features": False},
]
for sparse_format in [csc_matrix, csr_matrix]:
X_train_sparse = sparse_format(X_train)
X_test_sparse = sparse_format(X_test)
for params in parameter_sets:
# Trained on sparse format
sparse_classifier = BaggingRegressor(
base_estimator=CustomSVR(),
random_state=1,
**params
).fit(X_train_sparse, y_train)
sparse_results = sparse_classifier.predict(X_test_sparse)
# Trained on dense format
dense_results = BaggingRegressor(
base_estimator=CustomSVR(),
random_state=1,
**params
).fit(X_train, y_train).predict(X_test)
sparse_type = type(X_train_sparse)
types = [i.data_type_ for i in sparse_classifier.estimators_]
assert_array_equal(sparse_results, dense_results)
assert all([t == sparse_type for t in types])
assert_array_equal(sparse_results, dense_results)
def test_bootstrap_samples():
# Test that bootstraping samples generate non-perfect base estimators.
rng = check_random_state(0)
X_train, X_test, y_train, y_test = train_test_split(boston.data,
boston.target,
random_state=rng)
base_estimator = DecisionTreeRegressor().fit(X_train, y_train)
# without bootstrap, all trees are perfect on the training set
ensemble = BaggingRegressor(base_estimator=DecisionTreeRegressor(),
max_samples=1.0,
bootstrap=False,
random_state=rng).fit(X_train, y_train)
assert_equal(base_estimator.score(X_train, y_train),
ensemble.score(X_train, y_train))
# with bootstrap, trees are no longer perfect on the training set
ensemble = BaggingRegressor(base_estimator=DecisionTreeRegressor(),
max_samples=1.0,
bootstrap=True,
random_state=rng).fit(X_train, y_train)
assert_greater(base_estimator.score(X_train, y_train),
ensemble.score(X_train, y_train))
def test_bootstrap_features():
# Test that bootstraping features may generate dupplicate features.
rng = check_random_state(0)
X_train, X_test, y_train, y_test = train_test_split(boston.data,
boston.target,
random_state=rng)
ensemble = BaggingRegressor(base_estimator=DecisionTreeRegressor(),
max_features=1.0,
bootstrap_features=False,
random_state=rng).fit(X_train, y_train)
for features in ensemble.estimators_features_:
assert_equal(boston.data.shape[1], np.unique(features).shape[0])
ensemble = BaggingRegressor(base_estimator=DecisionTreeRegressor(),
max_features=1.0,
bootstrap_features=True,
random_state=rng).fit(X_train, y_train)
for features in ensemble.estimators_features_:
assert_greater(boston.data.shape[1], np.unique(features).shape[0])
def test_probability():
# Predict probabilities.
rng = check_random_state(0)
X_train, X_test, y_train, y_test = train_test_split(iris.data,
iris.target,
random_state=rng)
with np.errstate(divide="ignore", invalid="ignore"):
# Normal case
ensemble = BaggingClassifier(base_estimator=DecisionTreeClassifier(),
random_state=rng).fit(X_train, y_train)
assert_array_almost_equal(np.sum(ensemble.predict_proba(X_test),
axis=1),
np.ones(len(X_test)))
assert_array_almost_equal(ensemble.predict_proba(X_test),
np.exp(ensemble.predict_log_proba(X_test)))
# Degenerate case, where some classes are missing
ensemble = BaggingClassifier(base_estimator=LogisticRegression(),
random_state=rng,
max_samples=5).fit(X_train, y_train)
assert_array_almost_equal(np.sum(ensemble.predict_proba(X_test),
axis=1),
np.ones(len(X_test)))
assert_array_almost_equal(ensemble.predict_proba(X_test),
np.exp(ensemble.predict_log_proba(X_test)))
def test_oob_score_classification():
# Check that oob prediction is a good estimation of the generalization
# error.
rng = check_random_state(0)
X_train, X_test, y_train, y_test = train_test_split(iris.data,
iris.target,
random_state=rng)
for base_estimator in [DecisionTreeClassifier(), SVC()]:
clf = BaggingClassifier(base_estimator=base_estimator,
n_estimators=100,
bootstrap=True,
oob_score=True,
random_state=rng).fit(X_train, y_train)
test_score = clf.score(X_test, y_test)
assert_less(abs(test_score - clf.oob_score_), 0.1)
# Test with few estimators
assert_warns(UserWarning,
BaggingClassifier(base_estimator=base_estimator,
n_estimators=1,
bootstrap=True,
oob_score=True,
random_state=rng).fit,
X_train,
y_train)
def test_oob_score_regression():
# Check that oob prediction is a good estimation of the generalization
# error.
rng = check_random_state(0)
X_train, X_test, y_train, y_test = train_test_split(boston.data,
boston.target,
random_state=rng)
clf = BaggingRegressor(base_estimator=DecisionTreeRegressor(),
n_estimators=50,
bootstrap=True,
oob_score=True,
random_state=rng).fit(X_train, y_train)
test_score = clf.score(X_test, y_test)
assert_less(abs(test_score - clf.oob_score_), 0.1)
# Test with few estimators
assert_warns(UserWarning,
BaggingRegressor(base_estimator=DecisionTreeRegressor(),
n_estimators=1,
bootstrap=True,
oob_score=True,
random_state=rng).fit,
X_train,
y_train)
def test_single_estimator():
# Check singleton ensembles.
rng = check_random_state(0)
X_train, X_test, y_train, y_test = train_test_split(boston.data,
boston.target,
random_state=rng)
clf1 = BaggingRegressor(base_estimator=KNeighborsRegressor(),
n_estimators=1,
bootstrap=False,
bootstrap_features=False,
random_state=rng).fit(X_train, y_train)
clf2 = KNeighborsRegressor().fit(X_train, y_train)
assert_array_equal(clf1.predict(X_test), clf2.predict(X_test))
def test_error():
# Test that it gives proper exception on deficient input.
X, y = iris.data, iris.target
base = DecisionTreeClassifier()
# Test max_samples
assert_raises(ValueError,
BaggingClassifier(base, max_samples=-1).fit, X, y)
assert_raises(ValueError,
BaggingClassifier(base, max_samples=0.0).fit, X, y)
assert_raises(ValueError,
BaggingClassifier(base, max_samples=2.0).fit, X, y)
assert_raises(ValueError,
BaggingClassifier(base, max_samples=1000).fit, X, y)
assert_raises(ValueError,
BaggingClassifier(base, max_samples="foobar").fit, X, y)
# Test max_features
assert_raises(ValueError,
BaggingClassifier(base, max_features=-1).fit, X, y)
assert_raises(ValueError,
BaggingClassifier(base, max_features=0.0).fit, X, y)
assert_raises(ValueError,
BaggingClassifier(base, max_features=2.0).fit, X, y)
assert_raises(ValueError,
BaggingClassifier(base, max_features=5).fit, X, y)
assert_raises(ValueError,
BaggingClassifier(base, max_features="foobar").fit, X, y)
# Test support of decision_function
assert_false(hasattr(BaggingClassifier(base).fit(X, y), 'decision_function'))
def test_parallel_classification():
# Check parallel classification.
rng = check_random_state(0)
# Classification
X_train, X_test, y_train, y_test = train_test_split(iris.data,
iris.target,
random_state=rng)
ensemble = BaggingClassifier(DecisionTreeClassifier(),
n_jobs=3,
random_state=0).fit(X_train, y_train)
# predict_proba
ensemble.set_params(n_jobs=1)
y1 = ensemble.predict_proba(X_test)
ensemble.set_params(n_jobs=2)
y2 = ensemble.predict_proba(X_test)
assert_array_almost_equal(y1, y2)
ensemble = BaggingClassifier(DecisionTreeClassifier(),
n_jobs=1,
random_state=0).fit(X_train, y_train)
y3 = ensemble.predict_proba(X_test)
assert_array_almost_equal(y1, y3)
# decision_function
ensemble = BaggingClassifier(SVC(),
n_jobs=3,
random_state=0).fit(X_train, y_train)
ensemble.set_params(n_jobs=1)
decisions1 = ensemble.decision_function(X_test)
ensemble.set_params(n_jobs=2)
decisions2 = ensemble.decision_function(X_test)
assert_array_almost_equal(decisions1, decisions2)
ensemble = BaggingClassifier(SVC(),
n_jobs=1,
random_state=0).fit(X_train, y_train)
decisions3 = ensemble.decision_function(X_test)
assert_array_almost_equal(decisions1, decisions3)
def test_parallel_regression():
# Check parallel regression.
rng = check_random_state(0)
X_train, X_test, y_train, y_test = train_test_split(boston.data,
boston.target,
random_state=rng)
ensemble = BaggingRegressor(DecisionTreeRegressor(),
n_jobs=3,
random_state=0).fit(X_train, y_train)
ensemble.set_params(n_jobs=1)
y1 = ensemble.predict(X_test)
ensemble.set_params(n_jobs=2)
y2 = ensemble.predict(X_test)
assert_array_almost_equal(y1, y2)
ensemble = BaggingRegressor(DecisionTreeRegressor(),
n_jobs=1,
random_state=0).fit(X_train, y_train)
y3 = ensemble.predict(X_test)
assert_array_almost_equal(y1, y3)
def test_gridsearch():
# Check that bagging ensembles can be grid-searched.
# Transform iris into a binary classification task
X, y = iris.data, iris.target
y[y == 2] = 1
# Grid search with scoring based on decision_function
parameters = {'n_estimators': (1, 2),
'base_estimator__C': (1, 2)}
GridSearchCV(BaggingClassifier(SVC()),
parameters,
scoring="roc_auc").fit(X, y)
def test_base_estimator():
# Check base_estimator and its default values.
rng = check_random_state(0)
# Classification
X_train, X_test, y_train, y_test = train_test_split(iris.data,
iris.target,
random_state=rng)
ensemble = BaggingClassifier(None,
n_jobs=3,
random_state=0).fit(X_train, y_train)
assert_true(isinstance(ensemble.base_estimator_, DecisionTreeClassifier))
ensemble = BaggingClassifier(DecisionTreeClassifier(),
n_jobs=3,
random_state=0).fit(X_train, y_train)
assert_true(isinstance(ensemble.base_estimator_, DecisionTreeClassifier))
ensemble = BaggingClassifier(Perceptron(),
n_jobs=3,
random_state=0).fit(X_train, y_train)
assert_true(isinstance(ensemble.base_estimator_, Perceptron))
# Regression
X_train, X_test, y_train, y_test = train_test_split(boston.data,
boston.target,
random_state=rng)
ensemble = BaggingRegressor(None,
n_jobs=3,
random_state=0).fit(X_train, y_train)
assert_true(isinstance(ensemble.base_estimator_, DecisionTreeRegressor))
ensemble = BaggingRegressor(DecisionTreeRegressor(),
n_jobs=3,
random_state=0).fit(X_train, y_train)
assert_true(isinstance(ensemble.base_estimator_, DecisionTreeRegressor))
ensemble = BaggingRegressor(SVR(),
n_jobs=3,
random_state=0).fit(X_train, y_train)
assert_true(isinstance(ensemble.base_estimator_, SVR))
def test_bagging_with_pipeline():
estimator = BaggingClassifier(make_pipeline(SelectKBest(k=1),
DecisionTreeClassifier()),
max_features=2)
estimator.fit(iris.data, iris.target)
class DummyZeroEstimator(BaseEstimator):
def fit(self, X, y):
self.classes_ = np.unique(y)
return self
def predict(self, X):
return self.classes_[np.zeros(X.shape[0], dtype=int)]
def test_bagging_sample_weight_unsupported_but_passed():
estimator = BaggingClassifier(DummyZeroEstimator())
rng = check_random_state(0)
estimator.fit(iris.data, iris.target).predict(iris.data)
assert_raises(ValueError, estimator.fit, iris.data, iris.target,
sample_weight=rng.randint(10, size=(iris.data.shape[0])))
def test_warm_start(random_state=42):
# Test if fitting incrementally with warm start gives a forest of the
# right size and the same results as a normal fit.
X, y = make_hastie_10_2(n_samples=20, random_state=1)
clf_ws = None
for n_estimators in [5, 10]:
if clf_ws is None:
clf_ws = BaggingClassifier(n_estimators=n_estimators,
random_state=random_state,
warm_start=True)
else:
clf_ws.set_params(n_estimators=n_estimators)
clf_ws.fit(X, y)
assert_equal(len(clf_ws), n_estimators)
clf_no_ws = BaggingClassifier(n_estimators=10, random_state=random_state,
warm_start=False)
clf_no_ws.fit(X, y)
assert_equal(set([tree.random_state for tree in clf_ws]),
set([tree.random_state for tree in clf_no_ws]))
def test_warm_start_smaller_n_estimators():
# Test if warm start'ed second fit with smaller n_estimators raises error.
X, y = make_hastie_10_2(n_samples=20, random_state=1)
clf = BaggingClassifier(n_estimators=5, warm_start=True)
clf.fit(X, y)
clf.set_params(n_estimators=4)
assert_raises(ValueError, clf.fit, X, y)
def test_warm_start_equal_n_estimators():
# Test that nothing happens when fitting without increasing n_estimators
X, y = make_hastie_10_2(n_samples=20, random_state=1)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=43)
clf = BaggingClassifier(n_estimators=5, warm_start=True, random_state=83)
clf.fit(X_train, y_train)
y_pred = clf.predict(X_test)
# modify X to nonsense values, this should not change anything
X_train += 1.
assert_warns_message(UserWarning,
"Warm-start fitting without increasing n_estimators does not",
clf.fit, X_train, y_train)
assert_array_equal(y_pred, clf.predict(X_test))
def test_warm_start_equivalence():
# warm started classifier with 5+5 estimators should be equivalent to
# one classifier with 10 estimators
X, y = make_hastie_10_2(n_samples=20, random_state=1)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=43)
clf_ws = BaggingClassifier(n_estimators=5, warm_start=True,
random_state=3141)
clf_ws.fit(X_train, y_train)
clf_ws.set_params(n_estimators=10)
clf_ws.fit(X_train, y_train)
y1 = clf_ws.predict(X_test)
clf = BaggingClassifier(n_estimators=10, warm_start=False,
random_state=3141)
clf.fit(X_train, y_train)
y2 = clf.predict(X_test)
assert_array_almost_equal(y1, y2)
def test_warm_start_with_oob_score_fails():
# Check using oob_score and warm_start simultaneously fails
X, y = make_hastie_10_2(n_samples=20, random_state=1)
clf = BaggingClassifier(n_estimators=5, warm_start=True, oob_score=True)
assert_raises(ValueError, clf.fit, X, y)
def test_oob_score_removed_on_warm_start():
X, y = make_hastie_10_2(n_samples=2000, random_state=1)
clf = BaggingClassifier(n_estimators=50, oob_score=True)
clf.fit(X, y)
clf.set_params(warm_start=True, oob_score=False, n_estimators=100)
clf.fit(X, y)
assert_raises(AttributeError, getattr, clf, "oob_score_")
| bsd-3-clause |
plissonf/scikit-learn | examples/decomposition/plot_kernel_pca.py | 353 | 2011 | """
==========
Kernel PCA
==========
This example shows that Kernel PCA is able to find a projection of the data
that makes data linearly separable.
"""
print(__doc__)
# Authors: Mathieu Blondel
# Andreas Mueller
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.decomposition import PCA, KernelPCA
from sklearn.datasets import make_circles
np.random.seed(0)
X, y = make_circles(n_samples=400, factor=.3, noise=.05)
kpca = KernelPCA(kernel="rbf", fit_inverse_transform=True, gamma=10)
X_kpca = kpca.fit_transform(X)
X_back = kpca.inverse_transform(X_kpca)
pca = PCA()
X_pca = pca.fit_transform(X)
# Plot results
plt.figure()
plt.subplot(2, 2, 1, aspect='equal')
plt.title("Original space")
reds = y == 0
blues = y == 1
plt.plot(X[reds, 0], X[reds, 1], "ro")
plt.plot(X[blues, 0], X[blues, 1], "bo")
plt.xlabel("$x_1$")
plt.ylabel("$x_2$")
X1, X2 = np.meshgrid(np.linspace(-1.5, 1.5, 50), np.linspace(-1.5, 1.5, 50))
X_grid = np.array([np.ravel(X1), np.ravel(X2)]).T
# projection on the first principal component (in the phi space)
Z_grid = kpca.transform(X_grid)[:, 0].reshape(X1.shape)
plt.contour(X1, X2, Z_grid, colors='grey', linewidths=1, origin='lower')
plt.subplot(2, 2, 2, aspect='equal')
plt.plot(X_pca[reds, 0], X_pca[reds, 1], "ro")
plt.plot(X_pca[blues, 0], X_pca[blues, 1], "bo")
plt.title("Projection by PCA")
plt.xlabel("1st principal component")
plt.ylabel("2nd component")
plt.subplot(2, 2, 3, aspect='equal')
plt.plot(X_kpca[reds, 0], X_kpca[reds, 1], "ro")
plt.plot(X_kpca[blues, 0], X_kpca[blues, 1], "bo")
plt.title("Projection by KPCA")
plt.xlabel("1st principal component in space induced by $\phi$")
plt.ylabel("2nd component")
plt.subplot(2, 2, 4, aspect='equal')
plt.plot(X_back[reds, 0], X_back[reds, 1], "ro")
plt.plot(X_back[blues, 0], X_back[blues, 1], "bo")
plt.title("Original space after inverse transform")
plt.xlabel("$x_1$")
plt.ylabel("$x_2$")
plt.subplots_adjust(0.02, 0.10, 0.98, 0.94, 0.04, 0.35)
plt.show()
| bsd-3-clause |
sanketloke/scikit-learn | sklearn/model_selection/_split.py | 21 | 57608 | """
The :mod:`sklearn.model_selection._split` module includes classes and
functions to split the data based on a preset strategy.
"""
# Author: Alexandre Gramfort <alexandre.gramfort@inria.fr>,
# Gael Varoquaux <gael.varoquaux@normalesup.org>,
# Olivier Girsel <olivier.grisel@ensta.org>
# Raghav R V <rvraghav93@gmail.com>
# License: BSD 3 clause
from __future__ import print_function
from __future__ import division
import warnings
import inspect
from itertools import chain, combinations
from collections import Iterable
from math import ceil, floor
import numbers
from abc import ABCMeta, abstractmethod
import numpy as np
from scipy.misc import comb
from ..utils import indexable, check_random_state, safe_indexing
from ..utils.validation import _num_samples, column_or_1d
from ..utils.multiclass import type_of_target
from ..externals.six import with_metaclass
from ..externals.six.moves import zip
from ..utils.fixes import bincount
from ..utils.fixes import signature
from ..base import _pprint
from ..gaussian_process.kernels import Kernel as GPKernel
__all__ = ['BaseCrossValidator',
'KFold',
'LabelKFold',
'LeaveOneLabelOut',
'LeaveOneOut',
'LeavePLabelOut',
'LeavePOut',
'ShuffleSplit',
'LabelShuffleSplit',
'StratifiedKFold',
'StratifiedShuffleSplit',
'PredefinedSplit',
'train_test_split',
'check_cv']
class BaseCrossValidator(with_metaclass(ABCMeta)):
"""Base class for all cross-validators
Implementations must define `_iter_test_masks` or `_iter_test_indices`.
"""
def __init__(self):
# We need this for the build_repr to work properly in py2.7
# see #6304
pass
def split(self, X, y=None, labels=None):
"""Generate indices to split data into training and test set.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data, where n_samples is the number of samples
and n_features is the number of features.
y : array-like, of length n_samples
The target variable for supervised learning problems.
labels : array-like, with shape (n_samples,), optional
Group labels for the samples used while splitting the dataset into
train/test set.
Returns
-------
train : ndarray
The training set indices for that split.
test : ndarray
The testing set indices for that split.
"""
X, y, labels = indexable(X, y, labels)
indices = np.arange(_num_samples(X))
for test_index in self._iter_test_masks(X, y, labels):
train_index = indices[np.logical_not(test_index)]
test_index = indices[test_index]
yield train_index, test_index
# Since subclasses must implement either _iter_test_masks or
# _iter_test_indices, neither can be abstract.
def _iter_test_masks(self, X=None, y=None, labels=None):
"""Generates boolean masks corresponding to test sets.
By default, delegates to _iter_test_indices(X, y, labels)
"""
for test_index in self._iter_test_indices(X, y, labels):
test_mask = np.zeros(_num_samples(X), dtype=np.bool)
test_mask[test_index] = True
yield test_mask
def _iter_test_indices(self, X=None, y=None, labels=None):
"""Generates integer indices corresponding to test sets."""
raise NotImplementedError
@abstractmethod
def get_n_splits(self, X=None, y=None, labels=None):
"""Returns the number of splitting iterations in the cross-validator"""
def __repr__(self):
return _build_repr(self)
class LeaveOneOut(BaseCrossValidator):
"""Leave-One-Out cross-validator
Provides train/test indices to split data in train/test sets. Each
sample is used once as a test set (singleton) while the remaining
samples form the training set.
Note: ``LeaveOneOut()`` is equivalent to ``KFold(n_folds=n)`` and
``LeavePOut(p=1)`` where ``n`` is the number of samples.
Due to the high number of test sets (which is the same as the
number of samples) this cross-validation method can be very costly.
For large datasets one should favor :class:`KFold`, :class:`ShuffleSplit`
or :class:`StratifiedKFold`.
Read more in the :ref:`User Guide <cross_validation>`.
Examples
--------
>>> from sklearn.model_selection import LeaveOneOut
>>> X = np.array([[1, 2], [3, 4]])
>>> y = np.array([1, 2])
>>> loo = LeaveOneOut()
>>> loo.get_n_splits(X)
2
>>> print(loo)
LeaveOneOut()
>>> for train_index, test_index in loo.split(X):
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
... print(X_train, X_test, y_train, y_test)
TRAIN: [1] TEST: [0]
[[3 4]] [[1 2]] [2] [1]
TRAIN: [0] TEST: [1]
[[1 2]] [[3 4]] [1] [2]
See also
--------
LeaveOneLabelOut
For splitting the data according to explicit, domain-specific
stratification of the dataset.
LabelKFold: K-fold iterator variant with non-overlapping labels.
"""
def _iter_test_indices(self, X, y=None, labels=None):
return range(_num_samples(X))
def get_n_splits(self, X, y=None, labels=None):
"""Returns the number of splitting iterations in the cross-validator
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data, where n_samples is the number of samples
and n_features is the number of features.
y : object
Always ignored, exists for compatibility.
labels : object
Always ignored, exists for compatibility.
Returns
-------
n_splits : int
Returns the number of splitting iterations in the cross-validator.
"""
if X is None:
raise ValueError("The X parameter should not be None")
return _num_samples(X)
class LeavePOut(BaseCrossValidator):
"""Leave-P-Out cross-validator
Provides train/test indices to split data in train/test sets. This results
in testing on all distinct samples of size p, while the remaining n - p
samples form the training set in each iteration.
Note: ``LeavePOut(p)`` is NOT equivalent to
``KFold(n_folds=n_samples // p)`` which creates non-overlapping test sets.
Due to the high number of iterations which grows combinatorically with the
number of samples this cross-validation method can be very costly. For
large datasets one should favor :class:`KFold`, :class:`StratifiedKFold`
or :class:`ShuffleSplit`.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
p : int
Size of the test sets.
Examples
--------
>>> from sklearn.model_selection import LeavePOut
>>> X = np.array([[1, 2], [3, 4], [5, 6], [7, 8]])
>>> y = np.array([1, 2, 3, 4])
>>> lpo = LeavePOut(2)
>>> lpo.get_n_splits(X)
6
>>> print(lpo)
LeavePOut(p=2)
>>> for train_index, test_index in lpo.split(X):
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
TRAIN: [2 3] TEST: [0 1]
TRAIN: [1 3] TEST: [0 2]
TRAIN: [1 2] TEST: [0 3]
TRAIN: [0 3] TEST: [1 2]
TRAIN: [0 2] TEST: [1 3]
TRAIN: [0 1] TEST: [2 3]
"""
def __init__(self, p):
self.p = p
def _iter_test_indices(self, X, y=None, labels=None):
for combination in combinations(range(_num_samples(X)), self.p):
yield np.array(combination)
def get_n_splits(self, X, y=None, labels=None):
"""Returns the number of splitting iterations in the cross-validator
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data, where n_samples is the number of samples
and n_features is the number of features.
y : object
Always ignored, exists for compatibility.
labels : object
Always ignored, exists for compatibility.
"""
if X is None:
raise ValueError("The X parameter should not be None")
return int(comb(_num_samples(X), self.p, exact=True))
class _BaseKFold(with_metaclass(ABCMeta, BaseCrossValidator)):
"""Base class for KFold and StratifiedKFold"""
@abstractmethod
def __init__(self, n_folds, shuffle, random_state):
if not isinstance(n_folds, numbers.Integral):
raise ValueError('The number of folds must be of Integral type. '
'%s of type %s was passed.'
% (n_folds, type(n_folds)))
n_folds = int(n_folds)
if n_folds <= 1:
raise ValueError(
"k-fold cross-validation requires at least one"
" train/test split by setting n_folds=2 or more,"
" got n_folds={0}.".format(n_folds))
if not isinstance(shuffle, bool):
raise TypeError("shuffle must be True or False;"
" got {0}".format(shuffle))
self.n_folds = n_folds
self.shuffle = shuffle
self.random_state = random_state
def split(self, X, y=None, labels=None):
"""Generate indices to split data into training and test set.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data, where n_samples is the number of samples
and n_features is the number of features.
y : array-like, shape (n_samples,)
The target variable for supervised learning problems.
labels : array-like, with shape (n_samples,), optional
Group labels for the samples used while splitting the dataset into
train/test set.
Returns
-------
train : ndarray
The training set indices for that split.
test : ndarray
The testing set indices for that split.
"""
X, y, labels = indexable(X, y, labels)
n_samples = _num_samples(X)
if self.n_folds > n_samples:
raise ValueError(
("Cannot have number of folds n_folds={0} greater"
" than the number of samples: {1}.").format(self.n_folds,
n_samples))
for train, test in super(_BaseKFold, self).split(X, y, labels):
yield train, test
def get_n_splits(self, X=None, y=None, labels=None):
"""Returns the number of splitting iterations in the cross-validator
Parameters
----------
X : object
Always ignored, exists for compatibility.
y : object
Always ignored, exists for compatibility.
labels : object
Always ignored, exists for compatibility.
Returns
-------
n_splits : int
Returns the number of splitting iterations in the cross-validator.
"""
return self.n_folds
class KFold(_BaseKFold):
"""K-Folds cross-validator
Provides train/test indices to split data in train/test sets. Split
dataset into k consecutive folds (without shuffling by default).
Each fold is then used once as a validation while the k - 1 remaining
folds form the training set.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
n_folds : int, default=3
Number of folds. Must be at least 2.
shuffle : boolean, optional
Whether to shuffle the data before splitting into batches.
random_state : None, int or RandomState
When shuffle=True, pseudo-random number generator state used for
shuffling. If None, use default numpy RNG for shuffling.
Examples
--------
>>> from sklearn.model_selection import KFold
>>> X = np.array([[1, 2], [3, 4], [1, 2], [3, 4]])
>>> y = np.array([1, 2, 3, 4])
>>> kf = KFold(n_folds=2)
>>> kf.get_n_splits(X)
2
>>> print(kf) # doctest: +NORMALIZE_WHITESPACE
KFold(n_folds=2, random_state=None, shuffle=False)
>>> for train_index, test_index in kf.split(X):
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
TRAIN: [2 3] TEST: [0 1]
TRAIN: [0 1] TEST: [2 3]
Notes
-----
The first ``n_samples % n_folds`` folds have size
``n_samples // n_folds + 1``, other folds have size
``n_samples // n_folds``, where ``n_samples`` is the number of samples.
See also
--------
StratifiedKFold
Takes label information into account to avoid building folds with
imbalanced class distributions (for binary or multiclass
classification tasks).
LabelKFold: K-fold iterator variant with non-overlapping labels.
"""
def __init__(self, n_folds=3, shuffle=False,
random_state=None):
super(KFold, self).__init__(n_folds, shuffle, random_state)
def _iter_test_indices(self, X, y=None, labels=None):
n_samples = _num_samples(X)
indices = np.arange(n_samples)
if self.shuffle:
check_random_state(self.random_state).shuffle(indices)
n_folds = self.n_folds
fold_sizes = (n_samples // n_folds) * np.ones(n_folds, dtype=np.int)
fold_sizes[:n_samples % n_folds] += 1
current = 0
for fold_size in fold_sizes:
start, stop = current, current + fold_size
yield indices[start:stop]
current = stop
class LabelKFold(_BaseKFold):
"""K-fold iterator variant with non-overlapping labels.
The same label will not appear in two different folds (the number of
distinct labels has to be at least equal to the number of folds).
The folds are approximately balanced in the sense that the number of
distinct labels is approximately the same in each fold.
Parameters
----------
n_folds : int, default=3
Number of folds. Must be at least 2.
Examples
--------
>>> from sklearn.model_selection import LabelKFold
>>> X = np.array([[1, 2], [3, 4], [5, 6], [7, 8]])
>>> y = np.array([1, 2, 3, 4])
>>> labels = np.array([0, 0, 2, 2])
>>> label_kfold = LabelKFold(n_folds=2)
>>> label_kfold.get_n_splits(X, y, labels)
2
>>> print(label_kfold)
LabelKFold(n_folds=2)
>>> for train_index, test_index in label_kfold.split(X, y, labels):
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
... print(X_train, X_test, y_train, y_test)
...
TRAIN: [0 1] TEST: [2 3]
[[1 2]
[3 4]] [[5 6]
[7 8]] [1 2] [3 4]
TRAIN: [2 3] TEST: [0 1]
[[5 6]
[7 8]] [[1 2]
[3 4]] [3 4] [1 2]
See also
--------
LeaveOneLabelOut
For splitting the data according to explicit domain-specific
stratification of the dataset.
"""
def __init__(self, n_folds=3):
super(LabelKFold, self).__init__(n_folds, shuffle=False,
random_state=None)
def _iter_test_indices(self, X, y, labels):
if labels is None:
raise ValueError("The labels parameter should not be None")
unique_labels, labels = np.unique(labels, return_inverse=True)
n_labels = len(unique_labels)
if self.n_folds > n_labels:
raise ValueError("Cannot have number of folds n_folds=%d greater"
" than the number of labels: %d."
% (self.n_folds, n_labels))
# Weight labels by their number of occurrences
n_samples_per_label = np.bincount(labels)
# Distribute the most frequent labels first
indices = np.argsort(n_samples_per_label)[::-1]
n_samples_per_label = n_samples_per_label[indices]
# Total weight of each fold
n_samples_per_fold = np.zeros(self.n_folds)
# Mapping from label index to fold index
label_to_fold = np.zeros(len(unique_labels))
# Distribute samples by adding the largest weight to the lightest fold
for label_index, weight in enumerate(n_samples_per_label):
lightest_fold = np.argmin(n_samples_per_fold)
n_samples_per_fold[lightest_fold] += weight
label_to_fold[indices[label_index]] = lightest_fold
indices = label_to_fold[labels]
for f in range(self.n_folds):
yield np.where(indices == f)[0]
class StratifiedKFold(_BaseKFold):
"""Stratified K-Folds cross-validator
Provides train/test indices to split data in train/test sets.
This cross-validation object is a variation of KFold that returns
stratified folds. The folds are made by preserving the percentage of
samples for each class.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
n_folds : int, default=3
Number of folds. Must be at least 2.
shuffle : boolean, optional
Whether to shuffle each stratification of the data before splitting
into batches.
random_state : None, int or RandomState
When shuffle=True, pseudo-random number generator state used for
shuffling. If None, use default numpy RNG for shuffling.
Examples
--------
>>> from sklearn.model_selection import StratifiedKFold
>>> X = np.array([[1, 2], [3, 4], [1, 2], [3, 4]])
>>> y = np.array([0, 0, 1, 1])
>>> skf = StratifiedKFold(n_folds=2)
>>> skf.get_n_splits(X, y)
2
>>> print(skf) # doctest: +NORMALIZE_WHITESPACE
StratifiedKFold(n_folds=2, random_state=None, shuffle=False)
>>> for train_index, test_index in skf.split(X, y):
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
TRAIN: [1 3] TEST: [0 2]
TRAIN: [0 2] TEST: [1 3]
Notes
-----
All the folds have size ``trunc(n_samples / n_folds)``, the last one has
the complementary.
"""
def __init__(self, n_folds=3, shuffle=False, random_state=None):
super(StratifiedKFold, self).__init__(n_folds, shuffle, random_state)
def _make_test_folds(self, X, y=None, labels=None):
if self.shuffle:
rng = check_random_state(self.random_state)
else:
rng = self.random_state
y = np.asarray(y)
n_samples = y.shape[0]
unique_y, y_inversed = np.unique(y, return_inverse=True)
y_counts = bincount(y_inversed)
min_labels = np.min(y_counts)
if np.all(self.n_folds > y_counts):
raise ValueError("All the n_labels for individual classes"
" are less than %d folds."
% (self.n_folds))
if self.n_folds > min_labels:
warnings.warn(("The least populated class in y has only %d"
" members, which is too few. The minimum"
" number of labels for any class cannot"
" be less than n_folds=%d."
% (min_labels, self.n_folds)), Warning)
# pre-assign each sample to a test fold index using individual KFold
# splitting strategies for each class so as to respect the balance of
# classes
# NOTE: Passing the data corresponding to ith class say X[y==class_i]
# will break when the data is not 100% stratifiable for all classes.
# So we pass np.zeroes(max(c, n_folds)) as data to the KFold
per_cls_cvs = [
KFold(self.n_folds, shuffle=self.shuffle,
random_state=rng).split(np.zeros(max(count, self.n_folds)))
for count in y_counts]
test_folds = np.zeros(n_samples, dtype=np.int)
for test_fold_indices, per_cls_splits in enumerate(zip(*per_cls_cvs)):
for cls, (_, test_split) in zip(unique_y, per_cls_splits):
cls_test_folds = test_folds[y == cls]
# the test split can be too big because we used
# KFold(...).split(X[:max(c, n_folds)]) when data is not 100%
# stratifiable for all the classes
# (we use a warning instead of raising an exception)
# If this is the case, let's trim it:
test_split = test_split[test_split < len(cls_test_folds)]
cls_test_folds[test_split] = test_fold_indices
test_folds[y == cls] = cls_test_folds
return test_folds
def _iter_test_masks(self, X, y=None, labels=None):
test_folds = self._make_test_folds(X, y)
for i in range(self.n_folds):
yield test_folds == i
def split(self, X, y, labels=None):
"""Generate indices to split data into training and test set.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data, where n_samples is the number of samples
and n_features is the number of features.
y : array-like, shape (n_samples,)
The target variable for supervised learning problems.
labels : array-like, with shape (n_samples,), optional
Group labels for the samples used while splitting the dataset into
train/test set.
Returns
-------
train : ndarray
The training set indices for that split.
test : ndarray
The testing set indices for that split.
"""
return super(StratifiedKFold, self).split(X, y, labels)
class LeaveOneLabelOut(BaseCrossValidator):
"""Leave One Label Out cross-validator
Provides train/test indices to split data according to a third-party
provided label. This label information can be used to encode arbitrary
domain specific stratifications of the samples as integers.
For instance the labels could be the year of collection of the samples
and thus allow for cross-validation against time-based splits.
Read more in the :ref:`User Guide <cross_validation>`.
Examples
--------
>>> from sklearn.model_selection import LeaveOneLabelOut
>>> X = np.array([[1, 2], [3, 4], [5, 6], [7, 8]])
>>> y = np.array([1, 2, 1, 2])
>>> labels = np.array([1, 1, 2, 2])
>>> lol = LeaveOneLabelOut()
>>> lol.get_n_splits(X, y, labels)
2
>>> print(lol)
LeaveOneLabelOut()
>>> for train_index, test_index in lol.split(X, y, labels):
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
... print(X_train, X_test, y_train, y_test)
TRAIN: [2 3] TEST: [0 1]
[[5 6]
[7 8]] [[1 2]
[3 4]] [1 2] [1 2]
TRAIN: [0 1] TEST: [2 3]
[[1 2]
[3 4]] [[5 6]
[7 8]] [1 2] [1 2]
"""
def _iter_test_masks(self, X, y, labels):
if labels is None:
raise ValueError("The labels parameter should not be None")
# We make a copy of labels to avoid side-effects during iteration
labels = np.array(labels, copy=True)
unique_labels = np.unique(labels)
for i in unique_labels:
yield labels == i
def get_n_splits(self, X, y, labels):
"""Returns the number of splitting iterations in the cross-validator
Parameters
----------
X : object
Always ignored, exists for compatibility.
y : object
Always ignored, exists for compatibility.
labels : array-like, with shape (n_samples,), optional
Group labels for the samples used while splitting the dataset into
train/test set.
Returns
-------
n_splits : int
Returns the number of splitting iterations in the cross-validator.
"""
if labels is None:
raise ValueError("The labels parameter should not be None")
return len(np.unique(labels))
class LeavePLabelOut(BaseCrossValidator):
"""Leave P Labels Out cross-validator
Provides train/test indices to split data according to a third-party
provided label. This label information can be used to encode arbitrary
domain specific stratifications of the samples as integers.
For instance the labels could be the year of collection of the samples
and thus allow for cross-validation against time-based splits.
The difference between LeavePLabelOut and LeaveOneLabelOut is that
the former builds the test sets with all the samples assigned to
``p`` different values of the labels while the latter uses samples
all assigned the same labels.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
n_labels : int
Number of labels (``p``) to leave out in the test split.
Examples
--------
>>> from sklearn.model_selection import LeavePLabelOut
>>> X = np.array([[1, 2], [3, 4], [5, 6]])
>>> y = np.array([1, 2, 1])
>>> labels = np.array([1, 2, 3])
>>> lpl = LeavePLabelOut(n_labels=2)
>>> lpl.get_n_splits(X, y, labels)
3
>>> print(lpl)
LeavePLabelOut(n_labels=2)
>>> for train_index, test_index in lpl.split(X, y, labels):
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
... print(X_train, X_test, y_train, y_test)
TRAIN: [2] TEST: [0 1]
[[5 6]] [[1 2]
[3 4]] [1] [1 2]
TRAIN: [1] TEST: [0 2]
[[3 4]] [[1 2]
[5 6]] [2] [1 1]
TRAIN: [0] TEST: [1 2]
[[1 2]] [[3 4]
[5 6]] [1] [2 1]
See also
--------
LabelKFold: K-fold iterator variant with non-overlapping labels.
"""
def __init__(self, n_labels):
self.n_labels = n_labels
def _iter_test_masks(self, X, y, labels):
if labels is None:
raise ValueError("The labels parameter should not be None")
labels = np.array(labels, copy=True)
unique_labels = np.unique(labels)
combi = combinations(range(len(unique_labels)), self.n_labels)
for indices in combi:
test_index = np.zeros(_num_samples(X), dtype=np.bool)
for l in unique_labels[np.array(indices)]:
test_index[labels == l] = True
yield test_index
def get_n_splits(self, X, y, labels):
"""Returns the number of splitting iterations in the cross-validator
Parameters
----------
X : object
Always ignored, exists for compatibility.
y : object
Always ignored, exists for compatibility.
labels : array-like, with shape (n_samples,), optional
Group labels for the samples used while splitting the dataset into
train/test set.
Returns
-------
n_splits : int
Returns the number of splitting iterations in the cross-validator.
"""
if labels is None:
raise ValueError("The labels parameter should not be None")
return int(comb(len(np.unique(labels)), self.n_labels, exact=True))
class BaseShuffleSplit(with_metaclass(ABCMeta)):
"""Base class for ShuffleSplit and StratifiedShuffleSplit"""
def __init__(self, n_iter=10, test_size=0.1, train_size=None,
random_state=None):
_validate_shuffle_split_init(test_size, train_size)
self.n_iter = n_iter
self.test_size = test_size
self.train_size = train_size
self.random_state = random_state
def split(self, X, y=None, labels=None):
"""Generate indices to split data into training and test set.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data, where n_samples is the number of samples
and n_features is the number of features.
y : array-like, shape (n_samples,)
The target variable for supervised learning problems.
labels : array-like, with shape (n_samples,), optional
Group labels for the samples used while splitting the dataset into
train/test set.
Returns
-------
train : ndarray
The training set indices for that split.
test : ndarray
The testing set indices for that split.
"""
X, y, labels = indexable(X, y, labels)
for train, test in self._iter_indices(X, y, labels):
yield train, test
@abstractmethod
def _iter_indices(self, X, y=None, labels=None):
"""Generate (train, test) indices"""
def get_n_splits(self, X=None, y=None, labels=None):
"""Returns the number of splitting iterations in the cross-validator
Parameters
----------
X : object
Always ignored, exists for compatibility.
y : object
Always ignored, exists for compatibility.
labels : object
Always ignored, exists for compatibility.
Returns
-------
n_splits : int
Returns the number of splitting iterations in the cross-validator.
"""
return self.n_iter
def __repr__(self):
return _build_repr(self)
class ShuffleSplit(BaseShuffleSplit):
"""Random permutation cross-validator
Yields indices to split data into training and test sets.
Note: contrary to other cross-validation strategies, random splits
do not guarantee that all folds will be different, although this is
still very likely for sizeable datasets.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
n_iter : int (default 10)
Number of re-shuffling & splitting iterations.
test_size : float, int, or None, default 0.1
If float, should be between 0.0 and 1.0 and represent the
proportion of the dataset to include in the test split. If
int, represents the absolute number of test samples. If None,
the value is automatically set to the complement of the train size.
train_size : float, int, or None (default is None)
If float, should be between 0.0 and 1.0 and represent the
proportion of the dataset to include in the train split. If
int, represents the absolute number of train samples. If None,
the value is automatically set to the complement of the test size.
random_state : int or RandomState
Pseudo-random number generator state used for random sampling.
Examples
--------
>>> from sklearn.model_selection import ShuffleSplit
>>> X = np.array([[1, 2], [3, 4], [5, 6], [7, 8]])
>>> y = np.array([1, 2, 1, 2])
>>> rs = ShuffleSplit(n_iter=3, test_size=.25, random_state=0)
>>> rs.get_n_splits(X)
3
>>> print(rs)
ShuffleSplit(n_iter=3, random_state=0, test_size=0.25, train_size=None)
>>> for train_index, test_index in rs.split(X):
... print("TRAIN:", train_index, "TEST:", test_index)
... # doctest: +ELLIPSIS
TRAIN: [3 1 0] TEST: [2]
TRAIN: [2 1 3] TEST: [0]
TRAIN: [0 2 1] TEST: [3]
>>> rs = ShuffleSplit(n_iter=3, train_size=0.5, test_size=.25,
... random_state=0)
>>> for train_index, test_index in rs.split(X):
... print("TRAIN:", train_index, "TEST:", test_index)
... # doctest: +ELLIPSIS
TRAIN: [3 1] TEST: [2]
TRAIN: [2 1] TEST: [0]
TRAIN: [0 2] TEST: [3]
"""
def _iter_indices(self, X, y=None, labels=None):
n_samples = _num_samples(X)
n_train, n_test = _validate_shuffle_split(n_samples, self.test_size,
self.train_size)
rng = check_random_state(self.random_state)
for i in range(self.n_iter):
# random partition
permutation = rng.permutation(n_samples)
ind_test = permutation[:n_test]
ind_train = permutation[n_test:(n_test + n_train)]
yield ind_train, ind_test
class LabelShuffleSplit(ShuffleSplit):
'''Shuffle-Labels-Out cross-validation iterator
Provides randomized train/test indices to split data according to a
third-party provided label. This label information can be used to encode
arbitrary domain specific stratifications of the samples as integers.
For instance the labels could be the year of collection of the samples
and thus allow for cross-validation against time-based splits.
The difference between LeavePLabelOut and LabelShuffleSplit is that
the former generates splits using all subsets of size ``p`` unique labels,
whereas LabelShuffleSplit generates a user-determined number of random
test splits, each with a user-determined fraction of unique labels.
For example, a less computationally intensive alternative to
``LeavePLabelOut(p=10)`` would be
``LabelShuffleSplit(test_size=10, n_iter=100)``.
Note: The parameters ``test_size`` and ``train_size`` refer to labels, and
not to samples, as in ShuffleSplit.
Parameters
----------
n_iter : int (default 5)
Number of re-shuffling & splitting iterations.
test_size : float (default 0.2), int, or None
If float, should be between 0.0 and 1.0 and represent the
proportion of the labels to include in the test split. If
int, represents the absolute number of test labels. If None,
the value is automatically set to the complement of the train size.
train_size : float, int, or None (default is None)
If float, should be between 0.0 and 1.0 and represent the
proportion of the labels to include in the train split. If
int, represents the absolute number of train labels. If None,
the value is automatically set to the complement of the test size.
random_state : int or RandomState
Pseudo-random number generator state used for random sampling.
'''
def __init__(self, n_iter=5, test_size=0.2, train_size=None,
random_state=None):
super(LabelShuffleSplit, self).__init__(
n_iter=n_iter,
test_size=test_size,
train_size=train_size,
random_state=random_state)
def _iter_indices(self, X, y, labels):
if labels is None:
raise ValueError("The labels parameter should not be None")
classes, label_indices = np.unique(labels, return_inverse=True)
for label_train, label_test in super(
LabelShuffleSplit, self)._iter_indices(X=classes):
# these are the indices of classes in the partition
# invert them into data indices
train = np.flatnonzero(np.in1d(label_indices, label_train))
test = np.flatnonzero(np.in1d(label_indices, label_test))
yield train, test
class StratifiedShuffleSplit(BaseShuffleSplit):
"""Stratified ShuffleSplit cross-validator
Provides train/test indices to split data in train/test sets.
This cross-validation object is a merge of StratifiedKFold and
ShuffleSplit, which returns stratified randomized folds. The folds
are made by preserving the percentage of samples for each class.
Note: like the ShuffleSplit strategy, stratified random splits
do not guarantee that all folds will be different, although this is
still very likely for sizeable datasets.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
n_iter : int (default 10)
Number of re-shuffling & splitting iterations.
test_size : float (default 0.1), int, or None
If float, should be between 0.0 and 1.0 and represent the
proportion of the dataset to include in the test split. If
int, represents the absolute number of test samples. If None,
the value is automatically set to the complement of the train size.
train_size : float, int, or None (default is None)
If float, should be between 0.0 and 1.0 and represent the
proportion of the dataset to include in the train split. If
int, represents the absolute number of train samples. If None,
the value is automatically set to the complement of the test size.
random_state : int or RandomState
Pseudo-random number generator state used for random sampling.
Examples
--------
>>> from sklearn.model_selection import StratifiedShuffleSplit
>>> X = np.array([[1, 2], [3, 4], [1, 2], [3, 4]])
>>> y = np.array([0, 0, 1, 1])
>>> sss = StratifiedShuffleSplit(n_iter=3, test_size=0.5, random_state=0)
>>> sss.get_n_splits(X, y)
3
>>> print(sss) # doctest: +ELLIPSIS
StratifiedShuffleSplit(n_iter=3, random_state=0, ...)
>>> for train_index, test_index in sss.split(X, y):
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
TRAIN: [1 2] TEST: [3 0]
TRAIN: [0 2] TEST: [1 3]
TRAIN: [0 2] TEST: [3 1]
"""
def __init__(self, n_iter=10, test_size=0.1, train_size=None,
random_state=None):
super(StratifiedShuffleSplit, self).__init__(
n_iter, test_size, train_size, random_state)
def _iter_indices(self, X, y, labels=None):
n_samples = _num_samples(X)
n_train, n_test = _validate_shuffle_split(n_samples, self.test_size,
self.train_size)
classes, y_indices = np.unique(y, return_inverse=True)
n_classes = classes.shape[0]
class_counts = bincount(y_indices)
if np.min(class_counts) < 2:
raise ValueError("The least populated class in y has only 1"
" member, which is too few. The minimum"
" number of labels for any class cannot"
" be less than 2.")
if n_train < n_classes:
raise ValueError('The train_size = %d should be greater or '
'equal to the number of classes = %d' %
(n_train, n_classes))
if n_test < n_classes:
raise ValueError('The test_size = %d should be greater or '
'equal to the number of classes = %d' %
(n_test, n_classes))
rng = check_random_state(self.random_state)
p_i = class_counts / float(n_samples)
n_i = np.round(n_train * p_i).astype(int)
t_i = np.minimum(class_counts - n_i,
np.round(n_test * p_i).astype(int))
for _ in range(self.n_iter):
train = []
test = []
for i, class_i in enumerate(classes):
permutation = rng.permutation(class_counts[i])
perm_indices_class_i = np.where((y == class_i))[0][permutation]
train.extend(perm_indices_class_i[:n_i[i]])
test.extend(perm_indices_class_i[n_i[i]:n_i[i] + t_i[i]])
# Because of rounding issues (as n_train and n_test are not
# dividers of the number of elements per class), we may end
# up here with less samples in train and test than asked for.
if len(train) + len(test) < n_train + n_test:
# We complete by affecting randomly the missing indexes
missing_indices = np.where(bincount(train + test,
minlength=len(y)) == 0)[0]
missing_indices = rng.permutation(missing_indices)
n_missing_train = n_train - len(train)
n_missing_test = n_test - len(test)
if n_missing_train > 0:
train.extend(missing_indices[:n_missing_train])
if n_missing_test > 0:
test.extend(missing_indices[-n_missing_test:])
train = rng.permutation(train)
test = rng.permutation(test)
yield train, test
def split(self, X, y, labels=None):
"""Generate indices to split data into training and test set.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data, where n_samples is the number of samples
and n_features is the number of features.
y : array-like, shape (n_samples,)
The target variable for supervised learning problems.
labels : array-like, with shape (n_samples,), optional
Group labels for the samples used while splitting the dataset into
train/test set.
Returns
-------
train : ndarray
The training set indices for that split.
test : ndarray
The testing set indices for that split.
"""
return super(StratifiedShuffleSplit, self).split(X, y, labels)
def _validate_shuffle_split_init(test_size, train_size):
"""Validation helper to check the test_size and train_size at init
NOTE This does not take into account the number of samples which is known
only at split
"""
if test_size is None and train_size is None:
raise ValueError('test_size and train_size can not both be None')
if test_size is not None:
if np.asarray(test_size).dtype.kind == 'f':
if test_size >= 1.:
raise ValueError(
'test_size=%f should be smaller '
'than 1.0 or be an integer' % test_size)
elif np.asarray(test_size).dtype.kind != 'i':
# int values are checked during split based on the input
raise ValueError("Invalid value for test_size: %r" % test_size)
if train_size is not None:
if np.asarray(train_size).dtype.kind == 'f':
if train_size >= 1.:
raise ValueError("train_size=%f should be smaller "
"than 1.0 or be an integer" % train_size)
elif (np.asarray(test_size).dtype.kind == 'f' and
(train_size + test_size) > 1.):
raise ValueError('The sum of test_size and train_size = %f, '
'should be smaller than 1.0. Reduce '
'test_size and/or train_size.' %
(train_size + test_size))
elif np.asarray(train_size).dtype.kind != 'i':
# int values are checked during split based on the input
raise ValueError("Invalid value for train_size: %r" % train_size)
def _validate_shuffle_split(n_samples, test_size, train_size):
"""
Validation helper to check if the test/test sizes are meaningful wrt to the
size of the data (n_samples)
"""
if (test_size is not None and np.asarray(test_size).dtype.kind == 'i'
and test_size >= n_samples):
raise ValueError('test_size=%d should be smaller than the number of '
'samples %d' % (test_size, n_samples))
if (train_size is not None and np.asarray(train_size).dtype.kind == 'i'
and train_size >= n_samples):
raise ValueError("train_size=%d should be smaller than the number of"
" samples %d" % (train_size, n_samples))
if np.asarray(test_size).dtype.kind == 'f':
n_test = ceil(test_size * n_samples)
elif np.asarray(test_size).dtype.kind == 'i':
n_test = float(test_size)
if train_size is None:
n_train = n_samples - n_test
elif np.asarray(train_size).dtype.kind == 'f':
n_train = floor(train_size * n_samples)
else:
n_train = float(train_size)
if test_size is None:
n_test = n_samples - n_train
if n_train + n_test > n_samples:
raise ValueError('The sum of train_size and test_size = %d, '
'should be smaller than the number of '
'samples %d. Reduce test_size and/or '
'train_size.' % (n_train + n_test, n_samples))
return int(n_train), int(n_test)
class PredefinedSplit(BaseCrossValidator):
"""Predefined split cross-validator
Splits the data into training/test set folds according to a predefined
scheme. Each sample can be assigned to at most one test set fold, as
specified by the user through the ``test_fold`` parameter.
Read more in the :ref:`User Guide <cross_validation>`.
Examples
--------
>>> from sklearn.model_selection import PredefinedSplit
>>> X = np.array([[1, 2], [3, 4], [1, 2], [3, 4]])
>>> y = np.array([0, 0, 1, 1])
>>> test_fold = [0, 1, -1, 1]
>>> ps = PredefinedSplit(test_fold)
>>> ps.get_n_splits()
2
>>> print(ps) # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
PredefinedSplit(test_fold=array([ 0, 1, -1, 1]))
>>> for train_index, test_index in ps.split():
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
TRAIN: [1 2 3] TEST: [0]
TRAIN: [0 2] TEST: [1 3]
"""
def __init__(self, test_fold):
self.test_fold = np.array(test_fold, dtype=np.int)
self.test_fold = column_or_1d(self.test_fold)
self.unique_folds = np.unique(self.test_fold)
self.unique_folds = self.unique_folds[self.unique_folds != -1]
def split(self, X=None, y=None, labels=None):
"""Generate indices to split data into training and test set.
Parameters
----------
X : object
Always ignored, exists for compatibility.
y : object
Always ignored, exists for compatibility.
labels : object
Always ignored, exists for compatibility.
Returns
-------
train : ndarray
The training set indices for that split.
test : ndarray
The testing set indices for that split.
"""
ind = np.arange(len(self.test_fold))
for test_index in self._iter_test_masks():
train_index = ind[np.logical_not(test_index)]
test_index = ind[test_index]
yield train_index, test_index
def _iter_test_masks(self):
"""Generates boolean masks corresponding to test sets."""
for f in self.unique_folds:
test_index = np.where(self.test_fold == f)[0]
test_mask = np.zeros(len(self.test_fold), dtype=np.bool)
test_mask[test_index] = True
yield test_mask
def get_n_splits(self, X=None, y=None, labels=None):
"""Returns the number of splitting iterations in the cross-validator
Parameters
----------
X : object
Always ignored, exists for compatibility.
y : object
Always ignored, exists for compatibility.
labels : object
Always ignored, exists for compatibility.
Returns
-------
n_splits : int
Returns the number of splitting iterations in the cross-validator.
"""
return len(self.unique_folds)
class _CVIterableWrapper(BaseCrossValidator):
"""Wrapper class for old style cv objects and iterables."""
def __init__(self, cv):
self.cv = cv
def get_n_splits(self, X=None, y=None, labels=None):
"""Returns the number of splitting iterations in the cross-validator
Parameters
----------
X : object
Always ignored, exists for compatibility.
y : object
Always ignored, exists for compatibility.
labels : object
Always ignored, exists for compatibility.
Returns
-------
n_splits : int
Returns the number of splitting iterations in the cross-validator.
"""
return len(self.cv) # Both iterables and old-cv objects support len
def split(self, X=None, y=None, labels=None):
"""Generate indices to split data into training and test set.
Parameters
----------
X : object
Always ignored, exists for compatibility.
y : object
Always ignored, exists for compatibility.
labels : object
Always ignored, exists for compatibility.
Returns
-------
train : ndarray
The training set indices for that split.
test : ndarray
The testing set indices for that split.
"""
for train, test in self.cv:
yield train, test
def check_cv(cv=3, y=None, classifier=False):
"""Input checker utility for building a cross-validator
Parameters
----------
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross-validation,
- integer, to specify the number of folds.
- An object to be used as a cross-validation generator.
- An iterable yielding train/test splits.
For integer/None inputs, if classifier is True and ``y`` is either
binary or multiclass, :class:`StratifiedKFold` used. In all other
cases, :class:`KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
y : array-like, optional
The target variable for supervised learning problems.
classifier : boolean, optional, default False
Whether the task is a classification task, in which case
stratified KFold will be used.
Returns
-------
checked_cv : a cross-validator instance.
The return value is a cross-validator which generates the train/test
splits via the ``split`` method.
"""
if cv is None:
cv = 3
if isinstance(cv, numbers.Integral):
if (classifier and (y is not None) and
(type_of_target(y) in ('binary', 'multiclass'))):
return StratifiedKFold(cv)
else:
return KFold(cv)
if not hasattr(cv, 'split') or isinstance(cv, str):
if not isinstance(cv, Iterable) or isinstance(cv, str):
raise ValueError("Expected cv as an integer, cross-validation "
"object (from sklearn.model_selection) "
"or an iterable. Got %s." % cv)
return _CVIterableWrapper(cv)
return cv # New style cv objects are passed without any modification
def train_test_split(*arrays, **options):
"""Split arrays or matrices into random train and test subsets
Quick utility that wraps input validation and
``next(ShuffleSplit().split(X, y))`` and application to input data
into a single call for splitting (and optionally subsampling) data in a
oneliner.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
*arrays : sequence of indexables with same length / shape[0]
allowed inputs are lists, numpy arrays, scipy-sparse
matrices or pandas dataframes.
.. versionadded:: 0.16
preserves input type instead of always casting to numpy array.
test_size : float, int, or None (default is None)
If float, should be between 0.0 and 1.0 and represent the
proportion of the dataset to include in the test split. If
int, represents the absolute number of test samples. If None,
the value is automatically set to the complement of the train size.
If train size is also None, test size is set to 0.25.
train_size : float, int, or None (default is None)
If float, should be between 0.0 and 1.0 and represent the
proportion of the dataset to include in the train split. If
int, represents the absolute number of train samples. If None,
the value is automatically set to the complement of the test size.
random_state : int or RandomState
Pseudo-random number generator state used for random sampling.
stratify : array-like or None (default is None)
If not None, data is split in a stratified fashion, using this as
the labels array.
Returns
-------
splitting : list, length=2 * len(arrays)
List containing train-test split of inputs.
.. versionadded:: 0.16
Output type is the same as the input type.
Examples
--------
>>> import numpy as np
>>> from sklearn.model_selection import train_test_split
>>> X, y = np.arange(10).reshape((5, 2)), range(5)
>>> X
array([[0, 1],
[2, 3],
[4, 5],
[6, 7],
[8, 9]])
>>> list(y)
[0, 1, 2, 3, 4]
>>> X_train, X_test, y_train, y_test = train_test_split(
... X, y, test_size=0.33, random_state=42)
...
>>> X_train
array([[4, 5],
[0, 1],
[6, 7]])
>>> y_train
[2, 0, 3]
>>> X_test
array([[2, 3],
[8, 9]])
>>> y_test
[1, 4]
"""
n_arrays = len(arrays)
if n_arrays == 0:
raise ValueError("At least one array required as input")
test_size = options.pop('test_size', None)
train_size = options.pop('train_size', None)
random_state = options.pop('random_state', None)
stratify = options.pop('stratify', None)
if options:
raise TypeError("Invalid parameters passed: %s" % str(options))
if test_size is None and train_size is None:
test_size = 0.25
arrays = indexable(*arrays)
if stratify is not None:
CVClass = StratifiedShuffleSplit
else:
CVClass = ShuffleSplit
cv = CVClass(test_size=test_size,
train_size=train_size,
random_state=random_state)
train, test = next(cv.split(X=arrays[0], y=stratify))
return list(chain.from_iterable((safe_indexing(a, train),
safe_indexing(a, test)) for a in arrays))
train_test_split.__test__ = False # to avoid a pb with nosetests
def _safe_split(estimator, X, y, indices, train_indices=None):
"""Create subset of dataset and properly handle kernels."""
if (hasattr(estimator, 'kernel') and callable(estimator.kernel) and
not isinstance(estimator.kernel, GPKernel)):
# cannot compute the kernel values with custom function
raise ValueError("Cannot use a custom kernel function. "
"Precompute the kernel matrix instead.")
if not hasattr(X, "shape"):
if getattr(estimator, "_pairwise", False):
raise ValueError("Precomputed kernels or affinity matrices have "
"to be passed as arrays or sparse matrices.")
X_subset = [X[index] for index in indices]
else:
if getattr(estimator, "_pairwise", False):
# X is a precomputed square kernel matrix
if X.shape[0] != X.shape[1]:
raise ValueError("X should be a square kernel matrix")
if train_indices is None:
X_subset = X[np.ix_(indices, indices)]
else:
X_subset = X[np.ix_(indices, train_indices)]
else:
X_subset = safe_indexing(X, indices)
if y is not None:
y_subset = safe_indexing(y, indices)
else:
y_subset = None
return X_subset, y_subset
def _build_repr(self):
# XXX This is copied from BaseEstimator's get_params
cls = self.__class__
init = getattr(cls.__init__, 'deprecated_original', cls.__init__)
# Ignore varargs, kw and default values and pop self
init_signature = signature(init)
# Consider the constructor parameters excluding 'self'
if init is object.__init__:
args = []
else:
args = sorted([p.name for p in init_signature.parameters.values()
if p.name != 'self' and p.kind != p.VAR_KEYWORD])
class_name = self.__class__.__name__
params = dict()
for key in args:
# We need deprecation warnings to always be on in order to
# catch deprecated param values.
# This is set in utils/__init__.py but it gets overwritten
# when running under python3 somehow.
warnings.simplefilter("always", DeprecationWarning)
try:
with warnings.catch_warnings(record=True) as w:
value = getattr(self, key, None)
if len(w) and w[0].category == DeprecationWarning:
# if the parameter is deprecated, don't show it
continue
finally:
warnings.filters.pop(0)
params[key] = value
return '%s(%s)' % (class_name, _pprint(params, offset=len(class_name)))
| bsd-3-clause |
ryandougherty/mwa-capstone | MWA_Tools/build/matplotlib/examples/user_interfaces/interactive2.py | 3 | 10375 | #!/usr/bin/env python
# GTK Interactive Console
# (C) 2003, Jon Anderson
# See www.python.org/2.2/license.html for
# license details.
#
import gtk
import gtk.gdk
import code
import os, sys
import pango
import __builtin__
import __main__
banner = """GTK Interactive Python Console
Thanks to Jon Anderson
%s
""" % sys.version
banner += """
Welcome to matplotlib.
help(matplotlib) -- some general information about matplotlib
help(plotting) -- shows a list of plot specific commands
"""
class Completer:
"""
Taken from rlcompleter, with readline references stripped, and a local dictionary to use.
"""
def __init__(self, locals):
self.locals = locals
def complete(self, text, state):
"""Return the next possible completion for 'text'.
This is called successively with state == 0, 1, 2, ... until it
returns None. The completion should begin with 'text'.
"""
if state == 0:
if "." in text:
self.matches = self.attr_matches(text)
else:
self.matches = self.global_matches(text)
try:
return self.matches[state]
except IndexError:
return None
def global_matches(self, text):
"""Compute matches when text is a simple name.
Return a list of all keywords, built-in functions and names
currently defines in __main__ that match.
"""
import keyword
matches = []
n = len(text)
for list in [keyword.kwlist,__builtin__.__dict__.keys(),__main__.__dict__.keys(), self.locals.keys()]:
for word in list:
if word[:n] == text and word != "__builtins__":
matches.append(word)
return matches
def attr_matches(self, text):
"""Compute matches when text contains a dot.
Assuming the text is of the form NAME.NAME....[NAME], and is
evaluatable in the globals of __main__, it will be evaluated
and its attributes (as revealed by dir()) are used as possible
completions. (For class instances, class members are are also
considered.)
WARNING: this can still invoke arbitrary C code, if an object
with a __getattr__ hook is evaluated.
"""
import re
m = re.match(r"(\w+(\.\w+)*)\.(\w*)", text)
if not m:
return
expr, attr = m.group(1, 3)
object = eval(expr, __main__.__dict__, self.locals)
words = dir(object)
if hasattr(object,'__class__'):
words.append('__class__')
words = words + get_class_members(object.__class__)
matches = []
n = len(attr)
for word in words:
if word[:n] == attr and word != "__builtins__":
matches.append("%s.%s" % (expr, word))
return matches
def get_class_members(klass):
ret = dir(klass)
if hasattr(klass,'__bases__'):
for base in klass.__bases__:
ret = ret + get_class_members(base)
return ret
class OutputStream:
"""
A Multiplexing output stream.
It can replace another stream, and tee output to the original stream and too
a GTK textview.
"""
def __init__(self,view,old_out,style):
self.view = view
self.buffer = view.get_buffer()
self.mark = self.buffer.create_mark("End",self.buffer.get_end_iter(), False )
self.out = old_out
self.style = style
self.tee = 1
def write(self,text):
if self.tee:
self.out.write(text)
end = self.buffer.get_end_iter()
if not self.view == None:
self.view.scroll_to_mark(self.mark, 0, True, 1, 1)
self.buffer.insert_with_tags(end,text,self.style)
class GTKInterpreterConsole(gtk.ScrolledWindow):
"""
An InteractiveConsole for GTK. It's an actual widget,
so it can be dropped in just about anywhere.
"""
def __init__(self):
gtk.ScrolledWindow.__init__(self)
self.set_policy (gtk.POLICY_AUTOMATIC,gtk.POLICY_AUTOMATIC)
self.text = gtk.TextView()
self.text.set_wrap_mode(True)
self.interpreter = code.InteractiveInterpreter()
self.completer = Completer(self.interpreter.locals)
self.buffer = []
self.history = []
self.banner = banner
self.ps1 = ">>> "
self.ps2 = "... "
self.text.add_events( gtk.gdk.KEY_PRESS_MASK )
self.text.connect( "key_press_event", self.key_pressed )
self.current_history = -1
self.mark = self.text.get_buffer().create_mark("End",self.text.get_buffer().get_end_iter(), False )
#setup colors
self.style_banner = gtk.TextTag("banner")
self.style_banner.set_property( "foreground", "saddle brown" )
self.style_ps1 = gtk.TextTag("ps1")
self.style_ps1.set_property( "foreground", "DarkOrchid4" )
self.style_ps1.set_property( "editable", False )
self.style_ps1.set_property("font", "courier" )
self.style_ps2 = gtk.TextTag("ps2")
self.style_ps2.set_property( "foreground", "DarkOliveGreen" )
self.style_ps2.set_property( "editable", False )
self.style_ps2.set_property("font", "courier" )
self.style_out = gtk.TextTag("stdout")
self.style_out.set_property( "foreground", "midnight blue" )
self.style_err = gtk.TextTag("stderr")
self.style_err.set_property( "style", pango.STYLE_ITALIC )
self.style_err.set_property( "foreground", "red" )
self.text.get_buffer().get_tag_table().add(self.style_banner)
self.text.get_buffer().get_tag_table().add(self.style_ps1)
self.text.get_buffer().get_tag_table().add(self.style_ps2)
self.text.get_buffer().get_tag_table().add(self.style_out)
self.text.get_buffer().get_tag_table().add(self.style_err)
self.stdout = OutputStream(self.text,sys.stdout,self.style_out)
self.stderr = OutputStream(self.text,sys.stderr,self.style_err)
sys.stderr = self.stderr
sys.stdout = self.stdout
self.current_prompt = None
self.write_line(self.banner, self.style_banner)
self.prompt_ps1()
self.add(self.text)
self.text.show()
def reset_history(self):
self.history = []
def reset_buffer(self):
self.buffer = []
def prompt_ps1(self):
self.current_prompt = self.prompt_ps1
self.write_line(self.ps1,self.style_ps1)
def prompt_ps2(self):
self.current_prompt = self.prompt_ps2
self.write_line(self.ps2,self.style_ps2)
def write_line(self,text,style=None):
start,end = self.text.get_buffer().get_bounds()
if style==None:
self.text.get_buffer().insert(end,text)
else:
self.text.get_buffer().insert_with_tags(end,text,style)
self.text.scroll_to_mark(self.mark, 0, True, 1, 1)
def push(self, line):
self.buffer.append(line)
if len(line) > 0:
self.history.append(line)
source = "\n".join(self.buffer)
more = self.interpreter.runsource(source, "<<console>>")
if not more:
self.reset_buffer()
return more
def key_pressed(self,widget,event):
if event.keyval == gtk.gdk.keyval_from_name('Return'):
return self.execute_line()
if event.keyval == gtk.gdk.keyval_from_name('Up'):
self.current_history = self.current_history - 1
if self.current_history < - len(self.history):
self.current_history = - len(self.history)
return self.show_history()
elif event.keyval == gtk.gdk.keyval_from_name('Down'):
self.current_history = self.current_history + 1
if self.current_history > 0:
self.current_history = 0
return self.show_history()
elif event.keyval == gtk.gdk.keyval_from_name( 'Home'):
l = self.text.get_buffer().get_line_count() - 1
start = self.text.get_buffer().get_iter_at_line_offset(l,4)
self.text.get_buffer().place_cursor(start)
return True
elif event.keyval == gtk.gdk.keyval_from_name( 'space') and event.state & gtk.gdk.CONTROL_MASK:
return self.complete_line()
return False
def show_history(self):
if self.current_history == 0:
return True
else:
self.replace_line( self.history[self.current_history] )
return True
def current_line(self):
start,end = self.current_line_bounds()
return self.text.get_buffer().get_text(start,end, True)
def current_line_bounds(self):
txt_buffer = self.text.get_buffer()
l = txt_buffer.get_line_count() - 1
start = txt_buffer.get_iter_at_line(l)
if start.get_chars_in_line() >= 4:
start.forward_chars(4)
end = txt_buffer.get_end_iter()
return start,end
def replace_line(self,txt):
start,end = self.current_line_bounds()
self.text.get_buffer().delete(start,end)
self.write_line(txt)
def execute_line(self, line=None):
if line is None:
line = self.current_line()
self.write_line("\n")
else:
self.write_line(line + "\n")
more = self.push(line)
self.text.get_buffer().place_cursor(self.text.get_buffer().get_end_iter())
if more:
self.prompt_ps2()
else:
self.prompt_ps1()
self.current_history = 0
self.window.raise_()
return True
def complete_line(self):
line = self.current_line()
tokens = line.split()
token = tokens[-1]
completions = []
p = self.completer.complete(token,len(completions))
while p != None:
completions.append(p)
p = self.completer.complete(token, len(completions))
if len(completions) != 1:
self.write_line("\n")
self.write_line("\n".join(completions), self.style_ps1)
self.write_line("\n")
self.current_prompt()
self.write_line(line)
else:
i = line.rfind(token)
line = line[0:i] + completions[0]
self.replace_line(line)
return True
def main():
w = gtk.Window()
console = GTKInterpreterConsole()
console.set_size_request(640,480)
w.add(console)
def destroy(arg=None):
gtk.main_quit()
def key_event(widget,event):
if gtk.gdk.keyval_name( event.keyval) == 'd' and \
event.state & gtk.gdk.CONTROL_MASK:
destroy()
return False
w.connect("destroy", destroy)
w.add_events( gtk.gdk.KEY_PRESS_MASK )
w.connect( 'key_press_event', key_event)
w.show_all()
console.execute_line('import matplotlib')
console.execute_line("matplotlib.use('GTKAgg')")
console.execute_line('matplotlib.interactive(1)')
console.execute_line('from pylab import *')
if len(sys.argv)>1:
fname = sys.argv[1]
if not os.path.exists(fname):
print >> sys.stderr, '%s does not exist' % fname
for line in file(fname):
line = line.strip()
console.execute_line(line)
gtk.main()
if __name__ == '__main__':
main()
| gpl-2.0 |
hms-dbmi/clodius | clodius/tiles/bigbed.py | 1 | 10629 | import bbi
import functools as ft
import logging
import numpy as np
import pandas as pd
import random
import clodius.tiles.bigwig as hgbw
from concurrent.futures import ThreadPoolExecutor
DEFAULT_RANGE_MODE = "significant"
MIN_ELEMENTS = 1
MAX_ELEMENTS = 200
DEFAULT_SCORE = 0
logger = logging.getLogger(__name__)
range_modes = {}
range_modes["significant"] = {"name": "Significant", "value": "significant"}
def tileset_info(bbpath, chromsizes=None):
ti = hgbw.tileset_info(bbpath, chromsizes)
ti["range_modes"] = range_modes
return ti
def fetch_data(a):
(
bbpath,
binsize,
chromsizes,
range_mode,
min_elements,
max_elements,
cid,
start,
end,
) = a
"""
Retrieve tile data from a bigbed file.
This approach currently returns a subset of intervals within the bounds of the specified
query range.
The subset is determined, at this time, by using the population of scores in the score
column of the BED data to generate a quantile value that would allow, at most, a maximum
number of elements (either a default or specified value). Because intervals are discrete
elements, it is possible for a quantile to allow a few more elements than the desired
limit; in this case, a uniformly-random sample is drawn from the thresholded set without
replacement.
Parameters
----------
bbpath: string
The path to the bigBed media file
binsize: integer
Resolution of a bin at a particular zoom level
chromsizes: [[chrom, size],...]
A 2d array containing chromosome names and sizes. Overrides the
chromsizes in chromsizes_map
range_mode: string or None
If specified, determines what rule is applied to intervals retrieved
over the specified chromosome, start, and end range
min_elements: integer
For fetched intervals, return no fewer than the specified number
max_elements: integer
For fetched intervals, return no more than the specified number
cid: integer
Index of chromosome associated with chromsizes
start: integer
Start position of interval query (relative to chromosome)
end: integer
End position of interval query (relative to chromosome)
Returns
-------
intervals: [{'chrOffset': integer, 'importance': integer, 'fields': [interval]}, ... ]
A list of beddb-like gene annotation objects
"""
try:
chrom = chromsizes.index[cid]
fetch_factory = ft.partial(bbi.fetch_intervals, bbpath, chrom, start, end)
if range_mode == "significant":
intervals, intervals2 = fetch_factory(), fetch_factory()
else:
intervals, intervals2 = fetch_factory(), fetch_factory()
except IndexError:
# beyond the range of the available chromosomes
# probably means we've requested a range of absolute
# coordinates that stretch beyond the end of the genome
intervals, intervals2 = None, None
except KeyError:
# probably requested a chromosome that doesn't exist (e.g. chrM)
intervals, intervals2 = None, None
offset = 0
offsetIdx = 0
chrOffsets = {}
for chrSize in chromsizes:
chrOffsets[chromsizes.index[offsetIdx]] = offset
offset += chrSize
offsetIdx += 1
final_intervals = []
intervals_length = 0
scores = []
if not intervals:
return final_intervals
for interval in intervals:
try:
scores.append(int(interval[4]))
except (ValueError, IndexError):
scores.append(DEFAULT_SCORE)
intervals_length += 1
# generate beddb-like elements for parsing by the higlass plugin
if intervals_length >= min_elements and intervals_length <= max_elements:
for interval in intervals2:
try:
score = int(interval[4])
final_intervals.append(
{
"chrOffset": chrOffsets[chrom],
"importance": score,
"fields": interval,
}
)
except (ValueError, IndexError):
final_intervals.append(
{
"chrOffset": chrOffsets[chrom],
"importance": DEFAULT_SCORE,
"fields": interval,
}
)
elif intervals_length > max_elements:
thresholded_intervals = []
desired_perc = max_elements / intervals_length
thresholded_score = int(np.quantile(scores, 1 - desired_perc))
for interval in intervals2:
try:
score = int(interval[4])
if score >= thresholded_score:
thresholded_intervals.append(
{
"chrOffset": chrOffsets[chrom],
"importance": score,
"fields": interval,
}
)
except (ValueError, IndexError):
if DEFAULT_SCORE >= thresholded_score:
thresholded_intervals.append(
{
"chrOffset": chrOffsets[chrom],
"importance": DEFAULT_SCORE,
"fields": interval,
}
)
thresholded_intervals_length = len(thresholded_intervals)
if thresholded_intervals_length > max_elements:
indices = random.sample(range(thresholded_intervals_length), max_elements)
final_intervals = [thresholded_intervals[i] for i in sorted(indices)]
return final_intervals
def get_bigbed_tile(
bbpath,
zoom_level,
start_pos,
end_pos,
chromsizes=None,
range_mode=None,
min_elements=None,
max_elements=None,
):
if chromsizes is None:
chromsizes = hgbw.get_chromsizes(bbpath)
if min_elements is None:
min_elements = MIN_ELEMENTS
if max_elements is None:
max_elements = MAX_ELEMENTS
resolutions = hgbw.get_zoom_resolutions(chromsizes)
binsize = resolutions[zoom_level]
cids_starts_ends = list(hgbw.abs2genomic(chromsizes, start_pos, end_pos))
with ThreadPoolExecutor(max_workers=16) as e:
arrays = list(
e.map(
fetch_data,
[
tuple(
[
bbpath,
binsize,
chromsizes,
range_mode,
min_elements,
max_elements,
]
+ list(c)
)
for c in cids_starts_ends
],
)
)
# concatenate bigBed tileset data across chromosomes, so that it looks similar to a beddb response
results = [x for x in arrays if x != []]
return [item for sublist in results for item in sublist]
def tiles(bbpath, tile_ids, chromsizes_map={}, chromsizes=None):
"""
Generate tiles from a bigbed file.
Parameters
----------
tileset: tilesets.models.Tileset object
The tileset that the tile ids should be retrieved from
tile_ids: [str,...]
A list of tile_ids (e.g. xyx.0.0) identifying the tiles
to be retrieved
chromsizes_map: {uid: []}
A set of chromsizes listings corresponding to the parameters of the
tile_ids. To be used if a chromsizes id is passed in with the tile id
with the `|cos:id` tag in the tile id
chromsizes: [[chrom, size],...]
A 2d array containing chromosome names and sizes. Overrides the
chromsizes in chromsizes_map
Returns
-------
tile_list: [(tile_id, tile_data),...]
A list of tile_id, tile_data tuples
"""
min_elements = -1
max_elements = -1
generated_tiles = []
for tile_id in tile_ids:
tile_option_parts = tile_id.split("|")[1:]
tile_no_options = tile_id.split("|")[0]
tile_id_parts = tile_no_options.split(".")
tile_position = list(map(int, tile_id_parts[1:3]))
return_value = (
tile_id_parts[3] if len(tile_id_parts) > 3 else DEFAULT_RANGE_MODE
)
range_mode = return_value if return_value in range_modes else None
tile_options = dict([o.split(":") for o in tile_option_parts])
if "min" in tile_options:
min_elements = int(tile_options["min"])
if "max" in tile_options:
max_elements = int(tile_options["max"])
if min_elements > max_elements:
temp_min_elements = min_elements
min_elements = max_elements
max_elements = temp_min_elements
elif min_elements == max_elements:
min_elements = max_elements
max_elements = min_elements + 1
if max_elements <= 0:
min_elements = MIN_ELEMENTS
max_elements = MAX_ELEMENTS
if chromsizes:
chromnames = [c[0] for c in chromsizes]
chromlengths = [int(c[1]) for c in chromsizes]
chromsizes_to_use = pd.Series(chromlengths, index=chromnames)
else:
chromsizes_id = None
if "cos" in tile_options:
chromsizes_id = tile_options["cos"]
if chromsizes_id in chromsizes_map:
chromsizes_to_use = chromsizes_map[chromsizes_id]
else:
chromsizes_to_use = None
zoom_level = tile_position[0]
tile_pos = tile_position[1]
# this doesn't combine multiple consequetive ids, which
# would speed things up
if chromsizes_to_use is None:
chromsizes_to_use = hgbw.get_chromsizes(bbpath)
max_depth = hgbw.get_quadtree_depth(chromsizes_to_use)
tile_size = hgbw.TILE_SIZE * 2 ** (max_depth - zoom_level)
start_pos = tile_pos * tile_size
end_pos = start_pos + tile_size
tile_value = get_bigbed_tile(
bbpath,
zoom_level,
start_pos,
end_pos,
chromsizes_to_use,
range_mode=range_mode,
min_elements=min_elements,
max_elements=max_elements,
)
generated_tiles += [(tile_id, tile_value)]
return generated_tiles
def chromsizes(filename):
return hgbw.chromsizes(filename)
| mit |
LeBarbouze/tunacell | tunacell/plotting/dynamics.py | 1 | 34918 | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
This module defines plotting functions for the statistics of the dynamics.
"""
from __future__ import print_function
import os
import numpy as np
import collections
import logging
import matplotlib as mpl
import matplotlib.pyplot as plt
from matplotlib import ticker
import matplotlib.gridspec as gridspec
from tunacell.filters.main import FilterSet
from tunacell.stats.single import Univariate, StationaryUnivariate
from tunacell.stats.two import StationaryBivariate
from tunacell.io import text
from .helpers import _set_axis_limits, _set_timelabel, _set_time_axis_ticks
# few variables that will be used through all functions
default_fontsize = mpl.rcParams['font.size']
default_lw = mpl.rcParams['lines.linewidth']
def _set_condition_list(univariate, show_cdts='master'):
"""Set the list of conditions to show
Parameters
----------
show_cdts : str or FilterSet or iterable on these (default 'master')
the conditions to plot, use 'all' for all conditions in univariate
univariate : Univariate instance
conditions will be matched against conditions stored in univariate
Returns
-------
list of FilterSet (conditions) to show
"""
conditions = ['master', ] # list of conditions to be plotted
if show_cdts == 'all':
conditions = ['master', ] + univariate.cset
elif show_cdts == 'master':
pass
elif isinstance(show_cdts, collections.Iterable):
for item in show_cdts:
_append_cdt(univariate, item, conditions)
else:
_append_cdt(univariate, show_cdts, conditions)
return conditions
def _append_cdt(univariate, this_cdt, cdt_list):
"""Append condition associated to this_cdt in univariate object to cdt_list
Parameters
----------
univariate : :class:`Univariate` instance
this_cdt : str or :class:`FilterSet` instance
either the condition instance or its string representation
cdt_list : list of conditions
list of conditions to append condition to
"""
found = False
if isinstance(this_cdt, str):
# find which
for cdt in univariate.cset:
# TODO : compare also cdt.label
if repr(cdt) == this_cdt:
found = True
break
elif isinstance(this_cdt, FilterSet):
for cdt in univariate.cset:
if repr(cdt) == repr(this_cdt):
found = True
break
if found:
cdt_list.append(cdt)
return
def plot_onepoint(univariate, show_cdts='all', show_ci=False,
mean_ref=None, var_ref=None,
axe_xsize=6., axe_ysize=2.,
time_range=(None, None),
time_fractional_pad=.1,
counts_range=(None, None),
counts_fractional_pad=.1,
average_range=(None, None), # auto
average_fractional_pad=.1,
variance_range=(None, None),
variance_fractional_pad=.1,
show_legend=True,
show_cdt_details_in_legend=False,
use_obs_name=None,
save=False, user_path=None, ext='.png',
verbose=False):
"""Plot one point statistics: counts, average, abd variance.
One point functions are plotted for each condition set up in *show_cdts*
argument: 'all' for all conditions, or the string representation (or label)
of a particuler condition (or a list thereof).
Parameters
----------
univariate : Univariate instance
show_cdts : str (default 'all')
must be either 'all', or 'master', or the repr of a condition, or a
list thereof
show_ci : bool {False, True}
whether to show 99% confidence interval
mean_ref : float
reference mean value: what user expect to see as sample average to
compare with data
var_ref : float
reference variance value: what user expect to see as sample variance to
compare with data
axe_xsize : float (default 6)
size of the x-axis (inches)
axe_ysize : float (default 2.)
size if a single ax y-axis (inches)
time_range : couple of floats (default (None, None))
specifies (left, right) bounds
time_fractional_pad : float (default .1)
fraction of x-range to add as padding
counts_range : couple of floats (default (None, None))
specifies range for the Counts y-axis
counts_fractional_pad : float (default .2)
fractional amount of y-range to add as padding
average_range : couple of floats (default (None, None))
sepcifies range for the Average y-axis
average_fractional_pad : couple of floats (default .2)
fractional amounts of range to padding
variance_range : couple of floats (default (None, None))
sepcifies range for the Variance y-axis
average_fractional_pad : couple of floats (default .2)
fractional amounts of range to padding
show_legend : bool {True, False}
print out legend
show_cdt_details_in_legend : bool {False, True}
show details about filters
use_obs_name : str (default None)
when filled, the plot title will use this observable name instead
of looking for the observable registered name
save : bool {False, True}
whether to save plot
user_path : str (default None)
user defined path where to save figure; default is canonical path
(encouraged)
ext : str {'.png', '.pdf'}
extension to be used when saving file
verbose : bool {False, True}
"""
if not isinstance(univariate, Univariate):
raise TypeError('Input is not {}'.format(Univariate))
fig, axs = plt.subplots(3, 1, figsize=(axe_xsize, 3*axe_ysize))
obs = univariate.obs
timelabel = _set_timelabel(obs) # define time label
main_handles = [] # main legend
ci_handles = [] # additional legend (TODO: check if necessary)
all_times = []
all_counts = []
all_average = []
all_variance = []
# build condition list
conditions = _set_condition_list(univariate, show_cdts)
for index, cdt in enumerate(conditions):
if cdt == 'master':
c_repr = 'master'
c_label = 'all samples'
lw = default_lw + 1
alpha = 1
alpha_fill = .5
else:
c_repr = repr(cdt)
if show_cdt_details_in_legend:
c_label = str(cdt)
else:
c_label = cdt.label
lw = default_lw
alpha = .8
alpha_fill = 0.3
ok = np.where(univariate[c_repr].count_one > 0)
times = univariate[c_repr].time[ok]
all_times.extend(times)
counts = univariate[c_repr].count_one[ok]
all_counts.extend(counts)
mean = univariate[c_repr].average[ok]
all_average.extend(mean)
var = univariate[c_repr].var[ok]
all_variance.extend(var)
std = univariate[c_repr].std[ok]
se = 2.58 * std / np.sqrt(counts) # standard error 99% CI Gaussian
# var = np.diagonal(univariate[c_repr].autocorr)
line_counts, = axs[0].plot(times, counts, alpha=alpha, lw=lw,
label='{}'.format(c_label))
main_handles.append(line_counts)
color = line_counts.get_color()
average, = axs[1].plot(times, mean, color=color, alpha=0.8, lw=lw, label=c_label)
if show_ci:
fill_std = axs[1].fill_between(times, mean-se, mean+se,
facecolor=color, alpha=alpha_fill)
ci_handles.append(fill_std)
all_average.extend(mean-se)
all_average.extend(mean+se)
variance, = axs[2].plot(times, var, color=color, alpha=0.8, lw=lw, label=c_label)
# adding reference lines
if mean_ref is not None:
mref = axs[1].axhline(mean_ref, ls='-.', color='C7', alpha=.7,
label='reference value')
main_handles.append(mref)
all_average.append(mean_ref)
if var_ref is not None:
vref = axs[2].axhline(var_ref, ls='-.', color='C7', alpha=.7,
label='reference value')
# check last label if meanèref has been saved
last_lab = main_handles[-1].get_label()
if last_lab != vref.get_label():
main_handles.append(vref)
all_variance.append(var_ref)
# print vertical line at tref
if obs.timing != 'g' and isinstance(obs.tref, float):
for ax in axs:
vtref = ax.axvline(univariate.obs.tref, color='C7', ls='--',
alpha=.5, label='reference time in obs')
main_handles.append(vtref) # only the last one
# ## limits and ticks ##
# xaxis
for ax in axs:
left, right = _set_axis_limits(ax, all_times, which='x', pad=time_fractional_pad,
force_range=time_range)
# locator
locator = _set_time_axis_ticks(axs[0], obs, bounds=(left, right))
for ax in axs:
ax.xaxis.set_major_locator(locator)
# yaxis limits
_set_axis_limits(axs[0], all_counts, which='y', pad=counts_fractional_pad,
force_range=counts_range)
axs[0].yaxis.set_major_locator(ticker.MaxNLocator(nbins=3, integer=True))
# average
_set_axis_limits(axs[1], all_average, which='y', pad=average_fractional_pad,
force_range=average_range)
axs[1].yaxis.set_major_locator(ticker.MaxNLocator(nbins=3))
# variance
_set_axis_limits(axs[2], all_variance, which='y', pad=variance_fractional_pad,
force_range=variance_range)
axs[2].yaxis.set_major_locator(ticker.MaxNLocator(nbins=3))
# tick formatter
formatter = ticker.ScalarFormatter(useMathText=True, useOffset=False)
formatter.set_powerlimits((-2, 4))
for ax in axs:
ax.yaxis.set_major_formatter(formatter)
t = ax.yaxis.get_offset_text()
plt.draw()
msg = t.get_text()
ax.text(0, .95, msg, ha='left', va='top', transform=ax.transAxes)
t.set_visible(False)
axs[0].tick_params(axis='x', direction='in', bottom='on', labelbottom='on', pad=-10)
axs[1].tick_params(axis='x', direction='in', bottom='on', labelbottom='on', pad=-10)
axs[2].set_xlabel(timelabel, x=.95, horizontalalignment='right',
fontsize='medium')
# hide intermediate x axis
for ax in axs[:2]:
ax.spines['bottom'].set_visible(False)
ax.tick_params(axis='x', colors='C7')
for ax in axs[1:]:
ax.spines['top'].set_color('C7')
axs[0].set_ylabel('Counts', fontsize='medium')
axs[1].set_ylabel('Average', fontsize='medium')
axs[2].set_ylabel('Variance', fontsize='medium')
# ## legend ##
# C.I.
if ci_handles:
ci = ci_handles[0]
# ci.set_color('C7')
ci.set_label('.99 C.I.')
main_handles.append(ci)
handles = main_handles[:]
labels = [h.get_label() for h in handles]
if show_legend:
axs[-1].legend(handles=handles, labels=labels, loc='upper left',
bbox_to_anchor=(0, -.5/axe_ysize))
# title
latex_obs = obs.latexify(use_name=use_obs_name)
axs[0].text(0.5, 1+.2/axe_ysize,
r'{}'.format(latex_obs),
size='large',
horizontalalignment='center',
verticalalignment='bottom',
transform=axs[0].transAxes)
fig.subplots_adjust(hspace=0)
if save:
univ = univariate
try:
obs_path = univ._get_obs_path(user_root=user_path, write=False)
except text.MissingFolderError:
# it means data has not been written yet
# export data and then get
univ.export_text(analysis_folder=user_path)
obs_path = univ._get_obs_path(user_root=user_path, write=False)
bname = 'plot_onepoint_' + univ.obs.name + '_' + univ.region.name + ext
fname = os.path.join(obs_path, bname)
fig.savefig(fname, bbox_inches='tight', pad_inches=0)
if verbose:
print('Figure saved as {}'.format(fname))
return fig
def plot_twopoints(univariate, condition_label=None, trefs=[], ntrefs=4,
axe_xsize=6., axe_ysize=2.,
time_range=(None, None),
time_fractional_pad=.1,
counts_range=(None, None),
counts_fractional_pad=.1,
corr_range=(None, None), # auto
corr_fractional_pad=.1,
delta_t_max=None,
show_exp_decay=None,
show_legend=True,
show_cdt_details_in_legend=False,
use_obs_name=None,
save=False, ext='.png', verbose=False):
"""Plot two-point functions: counts and autocorrelation functions.
These plots are able to show only one extra condition with 'master', and
are plotted for a set of time of references.
Parameters
----------
univariate : :class:`Univariate` instance
condition_label : str (default None)
must be the repr of a given FilterSet
trefs : flist of floats
indicate the times that you would like to have as references
if left empty, reference times will be computed automatically
ntrefs : int
if trefs is empty, number of times of reference to display
axe_xsize : float (default 6)
size of the x-axis (inches)
axe_ysize : float (default 2.)
size if a single ax y-axis (inches)
time_range : couple of floats (default (None, None))
specifies (left, right) bounds
time_fractional_pad : float (default .1)
fraction of x-range to add as padding
counts_range : couple of floats (default (None, None))
specifies range for the Counts y-axis
counts_fractional_pad : float (default .2)
fractional amount of y-range to add as padding
corr_range : couple of floats (default (None, None))
sepcifies range for the Average y-axis
corr_fractional_pad : couple of floats (default .2)
fractional amounts of range to padding
delta_t_max : float (default None)
when given, bottom plot will be using this max range symmetrically;
otherwise, will use the largest intervals found in data (often too
large to see something)
show_exp_decay : float (default None)
when a floating point number is passed, a light exponential decay
curve is plotted for each tref
show_legend : bool {True, False}
print out legend
show_cdt_details_in_legend : bool {False, True}
show details about filters
use_obs_name : str (default None)
when filled, the plot title will use this observable name instead
of looking for the observable registered name
save : bool {False, True}
whether to save figure at canonical path
ext : str {'.png', '.pdf'}
extension to be used when saving figure
verbose : bool {False, True}
"""
obs = univariate.obs
timelabel = _set_timelabel(obs) # define time label
# get priod from eval times
if len(univariate.eval_times) > 0:
period = univariate.eval_times[1] - univariate.eval_times[0]
# or from experiment metadata
else:
period = univariate.exp.period
fig, axs = plt.subplots(3, 1, figsize=(axe_xsize, 3*axe_ysize))
# choice of index/indices for time of reference
times = univariate['master'].time
npoints = len(times)
if not trefs:
logging.info('Determining trefs...')
di = npoints // ntrefs + 1
indices = np.arange(0, npoints, di, dtype=int)
trefs = times[indices]
logging.info(trefs)
all_times = []
all_counts = []
all_corr = []
handles = []
# prep work for latex printing
latex_ref = '{{\mathrm{{ref}}}}'
if obs.timing == 'g':
prefix = 'g'
units = ''
else:
prefix = 't'
units = 'mins'
conditions = ['master', ] + univariate.cset
for index, cdt in enumerate(conditions):
if cdt == 'master':
c_repr = 'master'
c_label = 'all samples'
lw = default_lw + 1
lt = '-'
alpha = .8
elif cdt.label == condition_label or str(cdt) == condition_label or repr(cdt) == condition_label:
c_repr = repr(cdt)
if show_cdt_details_in_legend:
c_label = str(cdt)
else:
c_label = cdt.label
lw = default_lw
lt = '--'
alpha = .6
# we plot master and one condition if given, not more...
else:
continue
times = univariate[c_repr].time
counts = univariate[c_repr].count_two
corr = univariate[c_repr].autocorr
var = np.diagonal(corr)
valid = counts != 0
for tref in trefs:
# this tref may not be in conditioned data (who knows)
if np.amin(np.abs(times - tref)) > period:
continue
index = np.argmin(np.abs(times - tref))
if obs.timing == 'g':
lab = '{:d}'.format(tref)
else:
lab = '{:.0f}'.format(tref)
line_label = r'$ {}_{} = {}$ {} ({})'.format(prefix, latex_ref, lab, units, c_label)
ok = np.where(counts[index, :] > 0)
# if len(ok[0]) == 0:
# continue
# time limits
all_times.extend(times[ok])
dat, = axs[0].plot(times[ok], counts[index, :][ok],
ls=lt, lw=lw, alpha=alpha, label=line_label)
handles.append(dat)
all_counts.extend(counts[index, :][ok])
color = dat.get_color()
axs[0].plot((tref, tref), (0, counts[index, index]),
ls=':', color=color)
axs[1].axhline(0, ls='-', color='C7', alpha=.3) # thin line at 0
dat, = axs[1].plot(times[valid[index, :]],
corr[index, :][valid[index, :]]/var[index],
ls=lt, lw=lw, alpha=alpha)
all_corr.extend(corr[index, :][valid[index, :]]/var[index])
color = dat.get_color()
axs[1].axvline(tref, ymin=0.1, ymax=0.9, ls=':', color=color)
axs[2].axhline(0, ls='-', color='C7', alpha=.3) # thin line at 0
axs[2].plot(times[valid[index, :]] - tref,
corr[index, :][valid[index, :]]/var[index], ls=lt, lw=lw, alpha=alpha)
# ## limits and ticks ##
# xaxis
for ax in axs[:2]:
left, right = _set_axis_limits(ax, all_times, which='x',
pad=time_fractional_pad,
force_range=time_range)
hrange = right - left
ax.xaxis.set_major_locator(ticker.MaxNLocator(integer=True))
# bottom plot : try to zoom over provided range
if delta_t_max is not None:
axs[2].set_xlim(left=-delta_t_max, right=delta_t_max)
# if not provided, compute automatic ranges (not pretty usually)
else:
axs[2].set_xlim(left=-hrange, right=hrange)
axs[2].xaxis.set_major_locator(ticker.MaxNLocator(integer=True))
# add exponential decay
if show_exp_decay is not None:
tt = np.linspace(left, right, 100)
dd = np.linspace(-hrange, hrange, 100)
lab = r'$t_{{\mathrm{{decay}}}} = {:.1f}$ {}'.format(1./show_exp_decay, units)
for tref in trefs:
axs[1].plot(tt, np.exp(-show_exp_decay * np.abs(tt - tref)),
ls='-.', color='C7', alpha=.7)
dec, = axs[2].plot(dd, np.exp(-show_exp_decay * np.abs(dd)),
ls='-.', color='C7', alpha=.7, label=lab)
all_corr.extend(np.exp(-show_exp_decay * np.abs(dd)))
handles.append(dec)
# ## yaxis limits ##
# counts
_set_axis_limits(axs[0], all_counts, which='y', pad=counts_fractional_pad,
force_range=counts_range)
axs[0].yaxis.set_major_locator(ticker.MaxNLocator(nbins=3, integer=True))
# corr
for ax in axs[1:]:
_set_axis_limits(ax, all_corr, which='y', pad=corr_fractional_pad,
force_range=corr_range)
ax.yaxis.set_major_locator(ticker.MaxNLocator(nbins=3))
# legend
labels = [h.get_label() for h in handles]
axs[-1].legend(handles=handles, labels=labels, loc='upper left',
bbox_to_anchor=(0, -.5/axe_ysize), labelspacing=0.2) # reduce labelspacing because of LaTeX
formatter = ticker.ScalarFormatter(useMathText=True, useOffset=False)
formatter.set_powerlimits((-2, 4))
for ax in axs:
ax.yaxis.set_major_formatter(formatter)
t = ax.yaxis.get_offset_text()
plt.draw()
msg = t.get_text()
ax.text(0, .95, msg, ha='left', va='top', transform=ax.transAxes)
t.set_visible(False)
axs[0].tick_params(axis='x', direction='in', bottom='on', labelbottom='on', pad=-10)
axs[1].tick_params(axis='x', direction='in', bottom='on', labelbottom='on', pad=-10)
axs[2].set_xlabel(timelabel, x=.95, horizontalalignment='right',
fontsize='medium')
# hide intermediate x axis
for ax in axs[:1]:
ax.spines['bottom'].set_visible(False)
ax.tick_params(axis='x', colors='C7')
for ax in axs[1:2]:
ax.spines['top'].set_color('C7')
# ylabels
axs[0].set_ylabel(r'# $\langle t_{\mathrm{ref}} | t \rangle$',
fontsize='medium')
axs[1].set_ylabel(r'$a(t_{\mathrm{ref}}, t)$',
fontsize='medium')
axs[2].set_ylabel(r'$a(t_{\mathrm{ref}}, t- t_{\mathrm{ref}})$',
fontsize='medium')
# title
latex_obs = obs.latexify(use_name=use_obs_name)
axs[0].text(0.5, 1+.2/axe_ysize,
r'{}'.format(latex_obs),
size='large',
horizontalalignment='center',
verticalalignment='bottom',
transform=axs[0].transAxes)
fig.subplots_adjust(hspace=0.)
# save fig at canonical path
if save:
# export data files if not existing yet
try:
obs_path = univariate._get_obs_path(write=False)
except text.MissingFolderError:
univariate.write_text()
if condition_label is None:
univc = univariate.master
else:
univc = univariate[condition_label]
cdt_path = univc._get_path()
bname = 'plot_twopoints_' + obs.name + '_' + univariate.region.name + ext
fname = os.path.join(cdt_path, bname)
fig.savefig(fname, bbox_inches='tight', pad_inches=0)
if verbose:
print('Figure saved as {}'.format(fname))
return fig
def plot_stationary(stationary, show_cdts='all',
axe_xsize=6., axe_ysize=2.,
time_range=(None, None),
time_fractional_pad=.1,
time_guides=[0., ],
counts_range=(None, None),
counts_fractional_pad=.1,
corr_range=(None, None), # auto
counts_logscale=False,
corr_fractional_pad=.1,
corr_logscale=False,
corr_guides=[0., ],
show_exp_decay=None,
show_legend=True, show_cdt_details_in_legend=False,
use_obs_name=None,
save=False, ext='.png', verbose=False):
"""Plot stationary autocorrelation.
Parameters
----------
stationary : StationaryUnivariate or StationaryBivariate instance
axe_xsize : float (default 6)
size (in inches) of the x-axis
axe_ysize : float (default 2)
size (in inches) of the individual y-axis
time_range : couple of floats
bounds for time (x-axis)
time_fractional_pad : float
fractional padding for x-axis
counts_range : couple of ints
bounds for counts axis
counts_fractional_pad : float
fractional padding for counts axis
corr_range : couple of floats
bounds for correlation values
counts_logscale : bool {False, True}
use logscale for counts axis
corr_fractional_pad : float
fractional padding for correlation values
corr_logscale : bool {False, True}
use logscale for correlation values (symlog is used to display
symmetrically negative values)
corr_guides : list of float
values where to plot shaded grey horizontal lines
show_exp_decay : float (default None)
whether to plot an exponential decay with corresponding rate
exp(-rate * t)
save : bool {False, True}
whether to save plot at canonical path
use_obs_name : str (default None)
when filled, the plot title will use this observable name instead
of looking for the observable registered name
ext : str {'.png', '.pdf'}
extension used for file
Returns
-------
fig : Figure instance
"""
if not (isinstance(stationary, StationaryUnivariate) or
isinstance(stationary, StationaryBivariate)):
msg = ('Input is not an instance of '
'{}'.format(StationaryUnivariate) + 'or of '
'{}'.format(StationaryBivariate))
raise TypeError(msg)
if isinstance(stationary, StationaryUnivariate):
obs = stationary.obs
timelabel = _set_timelabel(obs, use_tref=False)
elif isinstance(stationary, StationaryBivariate):
obs = [uni.obs for uni in stationary.univariates]
timelabel = _set_timelabel(obs[0], use_tref=False)
if 'minutes' in timelabel:
units = 'mins'
prefix = 't'
else:
units = '' # generations are used
prefix = 'g'
timelabel = r'$\Delta$'+timelabel
nplots = 2
fig = plt.figure(figsize=(axe_xsize, (nplots + 1)*axe_ysize))
gs = gridspec.GridSpec(nplots + 1, 1)
ax1 = fig.add_subplot(gs[0])
ax2 = fig.add_subplot(gs[1:])
# build condition list
if isinstance(stationary, StationaryUnivariate):
conditions = _set_condition_list(stationary.univariate, show_cdts=show_cdts)
elif isinstance(stationary, StationaryBivariate):
conditions = []
conditions_0 = _set_condition_list(stationary.univariates[0], show_cdts=show_cdts)
conditions_1 = _set_condition_list(stationary.univariates[1], show_cdts=show_cdts)
# intersect
for cdt in conditions_0:
if cdt in conditions_1:
conditions.append(cdt)
all_times = []
all_counts = []
all_corrs = []
main_handles = [] # for legend
ci_handles = []
for index, cdt in enumerate(conditions):
if cdt == 'master':
c_repr = 'master'
c_label = 'all samples'
lw = default_lw + 1
alpha = 1
alpha_fill = .5
else:
c_repr = repr(cdt)
if show_cdt_details_in_legend:
c_label = str(cdt)
else:
c_label = cdt.label
lw = default_lw
alpha = .8
alpha_fill = 0.3
array = stationary[c_repr].array
nonzero = np.where(array['counts'] > 1) # 1 sample does not have std
dts = array['time_interval'][nonzero]
all_times.extend(dts)
counts = array['counts'][nonzero]
all_counts.extend(counts)
if isinstance(stationary, StationaryUnivariate):
corr = array['auto_correlation'][nonzero]
else:
corr = array['cross_correlation'][nonzero]
try:
dev = array['std_dev'][nonzero]
except ValueError:
dev = None
# counts
label = '{}'.format(c_label)
line, = ax1.plot(dts, counts, lw=lw, alpha=alpha, label=label)
main_handles.append(line)
col = line.get_color() # usefule for later stage
# autocorrelation: divide by variance
if isinstance(stationary, StationaryUnivariate):
norm = corr[0]
# cross-correlation: divide covariance by product of standard devs
elif isinstance(stationary, StationaryBivariate):
prod = 1.
for single in stationary.univariates:
prod *= np.sqrt(single[c_repr].stationary.autocorr[0])
norm = prod
dat, = ax2.plot(dts, corr/norm, color=col,
lw=lw, alpha=alpha, label=label)
all_corrs.extend(corr/norm)
if dev is not None:
se = 2.58 * dev / np.sqrt(counts)
ci = ax2.fill_between(dts, (corr-se)/norm, (corr+se)/norm,
facecolor=col, alpha=alpha_fill,
label='.99 C.I.')
ci_handles.append(ci)
all_corrs.extend((corr-se)/norm)
all_corrs.extend((corr+se)/norm)
# vertical lines for timing
for val in time_guides:
ax2.axvline(val, ls=':', color='C7', alpha=.5)
# horizontal lines for correlation ref
for val in corr_guides:
ax2.axhline(val, ls=':', color='C7', alpha=.5)
# ## limits and ticks ##
# xaxis
for ax in [ax1, ax2]:
left, right = _set_axis_limits(ax, all_times, which='x',
pad=time_fractional_pad,
force_range=time_range)
ax.xaxis.set_major_locator(ticker.MaxNLocator(integer=True))
if show_exp_decay is not None:
tt = np.linspace(left, right, 100)
yy = np.exp(-show_exp_decay*np.abs(tt))
lab = r'${}_{{\mathrm{{decay}}}} = {:.1f}$ {}'.format(prefix, 1./show_exp_decay, units)
ref, = ax2.plot(tt, yy, '-.', color='C7', alpha=1,
label=lab)
main_handles.append(ref)
# ## yaxis limits ##
# counts
formatter = ticker.ScalarFormatter(useMathText=True, useOffset=False)
formatter.set_powerlimits((-2, 4))
if not counts_logscale:
_set_axis_limits(ax1, all_counts, which='y', pad=counts_fractional_pad,
force_range=counts_range)
ax1.yaxis.set_major_locator(ticker.MaxNLocator(nbins=3, integer=True))
ax1.yaxis.set_major_formatter(formatter)
t = ax.yaxis.get_offset_text()
plt.draw()
msg = t.get_text()
ax1.text(0, .95, msg, ha='left', va='top', transform=ax.transAxes)
t.set_visible(False)
else:
ax1.set_yscale('symlog', linthresh=1)
# corr
if not corr_logscale:
bottom, top = _set_axis_limits(ax2, all_corrs, which='y',
pad=corr_fractional_pad,
force_range=corr_range)
if top > 2 or bottom < -2:
locator = ticker.MaxNLocator(nbins=5, integer=True)
else:
locator = ticker.FixedLocator([-1, -.5, 0., .5, 1])
ax2.yaxis.set_major_locator(locator)
ax2.yaxis.set_major_formatter(formatter)
t = ax.yaxis.get_offset_text()
plt.draw()
msg = t.get_text()
ax2.text(0, .95, msg, ha='left', va='top', transform=ax.transAxes)
t.set_visible(False)
else:
ax2.set_yscale('symlog', linthreshy=0.1, linscaley=0.2,
subsy=[2, 3, 4, 5, 6, 7, 8, 9])
if corr_range[0] is not None and corr_range[0] > 0.:
ax2.set_ylim(bottom=corr_range[0])
ax1.tick_params(axis='x', direction='in', bottom='on', labelbottom='on', pad=-10)
ax2.set_xlabel(timelabel, x=.95, horizontalalignment='right',
fontsize='medium')
# hide intermediate x axis
ax1.spines['bottom'].set_visible(False)
ax1.tick_params(axis='x', colors='C7')
ax2.spines['top'].set_color('C7')
# ylabels
ax1.set_ylabel(r'Counts', fontsize='medium')
if isinstance(stationary, StationaryUnivariate):
ax2.set_ylabel(r'$\tilde{{a}}(\Delta {})$'.format(prefix), fontsize='medium')
elif isinstance(stationary, StationaryBivariate):
ax2.set_ylabel(r'$\tilde{{c}}(\Delta {})$'.format(prefix), fontsize='medium')
# writting observable
# case: obs is a single observable
if isinstance(stationary, StationaryUnivariate):
msg = '{}:{}'.format(obs.latexify(shorten_time_variable=True, use_name=use_obs_name),
obs.latexify(plus_delta=True, shorten_time_variable=True, use_name=use_obs_name))
# case: obs is a couple of observables
else:
if use_obs_name is not None:
if isinstance(use_obs_name, str):
use_name_0 = use_obs_name
use_name_1 = None
else:
if len(use_obs_name) == 1:
use_name_0 = use_obs_name[0]
use_name_1 = None
else:
use_name_0 = use_obs_name[0]
use_name_1 = use_obs_name[1]
else:
use_name_0 = None
use_name_1 = None
msg = '{}:{}'.format(obs[0].latexify(shorten_time_variable=True,
use_name=use_name_0),
obs[1].latexify(plus_delta=True, shorten_time_variable=True,
use_name=use_name_1))
ax1.text(0.5, 1+.2/axe_ysize, r'{}'.format(msg),
size='large',
horizontalalignment='center',
verticalalignment='bottom',
transform=ax1.transAxes)
# ## legend ##
# C.I.
if ci_handles:
ci = ci_handles[0]
# ci.set_color('C7')
ci.set_label('.99 C.I.')
main_handles.append(ci)
handles = main_handles[:]
labels = [h.get_label() for h in handles]
if show_legend:
ax2.legend(handles=handles, labels=labels, loc='upper left',
bbox_to_anchor=(0, -.25/axe_ysize), labelspacing=.2)
fig.subplots_adjust(hspace=0)
if save:
# get univariate instance to get path where to save figure
bname = 'plot_stationary_'
try:
obs_path = stationary._get_obs_path(write=False)
except text.MissingFolderError:
stationary.write_text()
obs_path = stationary._get_obs_path(write=False)
obsname = os.path.basename(obs_path)
bname += obsname + '_'
bname += stationary.region.name + ext
fname = os.path.join(obs_path, bname)
fig.savefig(fname, bbox_inches='tight', pad_inches=0)
if verbose:
print('Figure saved as {}'.format(fname))
return fig
| mit |
trankmichael/scikit-learn | sklearn/cluster/tests/test_bicluster.py | 226 | 9457 | """Testing for Spectral Biclustering methods"""
import numpy as np
from scipy.sparse import csr_matrix, issparse
from sklearn.grid_search import ParameterGrid
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import SkipTest
from sklearn.base import BaseEstimator, BiclusterMixin
from sklearn.cluster.bicluster import SpectralCoclustering
from sklearn.cluster.bicluster import SpectralBiclustering
from sklearn.cluster.bicluster import _scale_normalize
from sklearn.cluster.bicluster import _bistochastic_normalize
from sklearn.cluster.bicluster import _log_normalize
from sklearn.metrics import consensus_score
from sklearn.datasets import make_biclusters, make_checkerboard
class MockBiclustering(BaseEstimator, BiclusterMixin):
# Mock object for testing get_submatrix.
def __init__(self):
pass
def get_indices(self, i):
# Overridden to reproduce old get_submatrix test.
return (np.where([True, True, False, False, True])[0],
np.where([False, False, True, True])[0])
def test_get_submatrix():
data = np.arange(20).reshape(5, 4)
model = MockBiclustering()
for X in (data, csr_matrix(data), data.tolist()):
submatrix = model.get_submatrix(0, X)
if issparse(submatrix):
submatrix = submatrix.toarray()
assert_array_equal(submatrix, [[2, 3],
[6, 7],
[18, 19]])
submatrix[:] = -1
if issparse(X):
X = X.toarray()
assert_true(np.all(X != -1))
def _test_shape_indices(model):
# Test get_shape and get_indices on fitted model.
for i in range(model.n_clusters):
m, n = model.get_shape(i)
i_ind, j_ind = model.get_indices(i)
assert_equal(len(i_ind), m)
assert_equal(len(j_ind), n)
def test_spectral_coclustering():
# Test Dhillon's Spectral CoClustering on a simple problem.
param_grid = {'svd_method': ['randomized', 'arpack'],
'n_svd_vecs': [None, 20],
'mini_batch': [False, True],
'init': ['k-means++'],
'n_init': [10],
'n_jobs': [1]}
random_state = 0
S, rows, cols = make_biclusters((30, 30), 3, noise=0.5,
random_state=random_state)
S -= S.min() # needs to be nonnegative before making it sparse
S = np.where(S < 1, 0, S) # threshold some values
for mat in (S, csr_matrix(S)):
for kwargs in ParameterGrid(param_grid):
model = SpectralCoclustering(n_clusters=3,
random_state=random_state,
**kwargs)
model.fit(mat)
assert_equal(model.rows_.shape, (3, 30))
assert_array_equal(model.rows_.sum(axis=0), np.ones(30))
assert_array_equal(model.columns_.sum(axis=0), np.ones(30))
assert_equal(consensus_score(model.biclusters_,
(rows, cols)), 1)
_test_shape_indices(model)
def test_spectral_biclustering():
# Test Kluger methods on a checkerboard dataset.
S, rows, cols = make_checkerboard((30, 30), 3, noise=0.5,
random_state=0)
non_default_params = {'method': ['scale', 'log'],
'svd_method': ['arpack'],
'n_svd_vecs': [20],
'mini_batch': [True]}
for mat in (S, csr_matrix(S)):
for param_name, param_values in non_default_params.items():
for param_value in param_values:
model = SpectralBiclustering(
n_clusters=3,
n_init=3,
init='k-means++',
random_state=0,
)
model.set_params(**dict([(param_name, param_value)]))
if issparse(mat) and model.get_params().get('method') == 'log':
# cannot take log of sparse matrix
assert_raises(ValueError, model.fit, mat)
continue
else:
model.fit(mat)
assert_equal(model.rows_.shape, (9, 30))
assert_equal(model.columns_.shape, (9, 30))
assert_array_equal(model.rows_.sum(axis=0),
np.repeat(3, 30))
assert_array_equal(model.columns_.sum(axis=0),
np.repeat(3, 30))
assert_equal(consensus_score(model.biclusters_,
(rows, cols)), 1)
_test_shape_indices(model)
def _do_scale_test(scaled):
"""Check that rows sum to one constant, and columns to another."""
row_sum = scaled.sum(axis=1)
col_sum = scaled.sum(axis=0)
if issparse(scaled):
row_sum = np.asarray(row_sum).squeeze()
col_sum = np.asarray(col_sum).squeeze()
assert_array_almost_equal(row_sum, np.tile(row_sum.mean(), 100),
decimal=1)
assert_array_almost_equal(col_sum, np.tile(col_sum.mean(), 100),
decimal=1)
def _do_bistochastic_test(scaled):
"""Check that rows and columns sum to the same constant."""
_do_scale_test(scaled)
assert_almost_equal(scaled.sum(axis=0).mean(),
scaled.sum(axis=1).mean(),
decimal=1)
def test_scale_normalize():
generator = np.random.RandomState(0)
X = generator.rand(100, 100)
for mat in (X, csr_matrix(X)):
scaled, _, _ = _scale_normalize(mat)
_do_scale_test(scaled)
if issparse(mat):
assert issparse(scaled)
def test_bistochastic_normalize():
generator = np.random.RandomState(0)
X = generator.rand(100, 100)
for mat in (X, csr_matrix(X)):
scaled = _bistochastic_normalize(mat)
_do_bistochastic_test(scaled)
if issparse(mat):
assert issparse(scaled)
def test_log_normalize():
# adding any constant to a log-scaled matrix should make it
# bistochastic
generator = np.random.RandomState(0)
mat = generator.rand(100, 100)
scaled = _log_normalize(mat) + 1
_do_bistochastic_test(scaled)
def test_fit_best_piecewise():
model = SpectralBiclustering(random_state=0)
vectors = np.array([[0, 0, 0, 1, 1, 1],
[2, 2, 2, 3, 3, 3],
[0, 1, 2, 3, 4, 5]])
best = model._fit_best_piecewise(vectors, n_best=2, n_clusters=2)
assert_array_equal(best, vectors[:2])
def test_project_and_cluster():
model = SpectralBiclustering(random_state=0)
data = np.array([[1, 1, 1],
[1, 1, 1],
[3, 6, 3],
[3, 6, 3]])
vectors = np.array([[1, 0],
[0, 1],
[0, 0]])
for mat in (data, csr_matrix(data)):
labels = model._project_and_cluster(data, vectors,
n_clusters=2)
assert_array_equal(labels, [0, 0, 1, 1])
def test_perfect_checkerboard():
raise SkipTest("This test is failing on the buildbot, but cannot"
" reproduce. Temporarily disabling it until it can be"
" reproduced and fixed.")
model = SpectralBiclustering(3, svd_method="arpack", random_state=0)
S, rows, cols = make_checkerboard((30, 30), 3, noise=0,
random_state=0)
model.fit(S)
assert_equal(consensus_score(model.biclusters_,
(rows, cols)), 1)
S, rows, cols = make_checkerboard((40, 30), 3, noise=0,
random_state=0)
model.fit(S)
assert_equal(consensus_score(model.biclusters_,
(rows, cols)), 1)
S, rows, cols = make_checkerboard((30, 40), 3, noise=0,
random_state=0)
model.fit(S)
assert_equal(consensus_score(model.biclusters_,
(rows, cols)), 1)
def test_errors():
data = np.arange(25).reshape((5, 5))
model = SpectralBiclustering(n_clusters=(3, 3, 3))
assert_raises(ValueError, model.fit, data)
model = SpectralBiclustering(n_clusters='abc')
assert_raises(ValueError, model.fit, data)
model = SpectralBiclustering(n_clusters=(3, 'abc'))
assert_raises(ValueError, model.fit, data)
model = SpectralBiclustering(method='unknown')
assert_raises(ValueError, model.fit, data)
model = SpectralBiclustering(svd_method='unknown')
assert_raises(ValueError, model.fit, data)
model = SpectralBiclustering(n_components=0)
assert_raises(ValueError, model.fit, data)
model = SpectralBiclustering(n_best=0)
assert_raises(ValueError, model.fit, data)
model = SpectralBiclustering(n_components=3, n_best=4)
assert_raises(ValueError, model.fit, data)
model = SpectralBiclustering()
data = np.arange(27).reshape((3, 3, 3))
assert_raises(ValueError, model.fit, data)
| bsd-3-clause |
ningchi/scikit-learn | examples/ensemble/plot_adaboost_regression.py | 26 | 1523 | """
======================================
Decision Tree Regression with AdaBoost
======================================
A decision tree is boosted using the AdaBoost.R2 [1] algorithm on a 1D
sinusoidal dataset with a small amount of Gaussian noise.
299 boosts (300 decision trees) is compared with a single decision tree
regressor. As the number of boosts is increased the regressor can fit more
detail.
.. [1] H. Drucker, "Improving Regressors using Boosting Techniques", 1997.
"""
print(__doc__)
# Author: Noel Dawe <noel.dawe@gmail.com>
#
# License: BSD 3 clause
# importing necessary libraries
import numpy as np
import matplotlib.pyplot as plt
from sklearn.tree import DecisionTreeRegressor
from sklearn.ensemble import AdaBoostRegressor
# Create the dataset
rng = np.random.RandomState(1)
X = np.linspace(0, 6, 100)[:, np.newaxis]
y = np.sin(X).ravel() + np.sin(6 * X).ravel() + rng.normal(0, 0.1, X.shape[0])
# Fit regression model
clf_1 = DecisionTreeRegressor(max_depth=4)
clf_2 = AdaBoostRegressor(DecisionTreeRegressor(max_depth=4),
n_estimators=300, random_state=rng)
clf_1.fit(X, y)
clf_2.fit(X, y)
# Predict
y_1 = clf_1.predict(X)
y_2 = clf_2.predict(X)
# Plot the results
plt.figure()
plt.scatter(X, y, c="k", label="training samples")
plt.plot(X, y_1, c="g", label="n_estimators=1", linewidth=2)
plt.plot(X, y_2, c="r", label="n_estimators=300", linewidth=2)
plt.xlabel("data")
plt.ylabel("target")
plt.title("Boosted Decision Tree Regression")
plt.legend()
plt.show()
| bsd-3-clause |
zaxtax/scikit-learn | sklearn/tree/tree.py | 23 | 40423 | """
This module gathers tree-based methods, including decision, regression and
randomized trees. Single and multi-output problems are both handled.
"""
# Authors: Gilles Louppe <g.louppe@gmail.com>
# Peter Prettenhofer <peter.prettenhofer@gmail.com>
# Brian Holt <bdholt1@gmail.com>
# Noel Dawe <noel@dawe.me>
# Satrajit Gosh <satrajit.ghosh@gmail.com>
# Joly Arnaud <arnaud.v.joly@gmail.com>
# Fares Hedayati <fares.hedayati@gmail.com>
#
# Licence: BSD 3 clause
from __future__ import division
import numbers
from abc import ABCMeta
from abc import abstractmethod
from math import ceil
import numpy as np
from scipy.sparse import issparse
from ..base import BaseEstimator
from ..base import ClassifierMixin
from ..base import RegressorMixin
from ..externals import six
from ..feature_selection.from_model import _LearntSelectorMixin
from ..utils import check_array
from ..utils import check_random_state
from ..utils import compute_sample_weight
from ..utils.multiclass import check_classification_targets
from ..exceptions import NotFittedError
from ._criterion import Criterion
from ._splitter import Splitter
from ._tree import DepthFirstTreeBuilder
from ._tree import BestFirstTreeBuilder
from ._tree import Tree
from . import _tree, _splitter, _criterion
__all__ = ["DecisionTreeClassifier",
"DecisionTreeRegressor",
"ExtraTreeClassifier",
"ExtraTreeRegressor"]
# =============================================================================
# Types and constants
# =============================================================================
DTYPE = _tree.DTYPE
DOUBLE = _tree.DOUBLE
CRITERIA_CLF = {"gini": _criterion.Gini, "entropy": _criterion.Entropy}
CRITERIA_REG = {"mse": _criterion.MSE, "friedman_mse": _criterion.FriedmanMSE}
DENSE_SPLITTERS = {"best": _splitter.BestSplitter,
"random": _splitter.RandomSplitter}
SPARSE_SPLITTERS = {"best": _splitter.BestSparseSplitter,
"random": _splitter.RandomSparseSplitter}
# =============================================================================
# Base decision tree
# =============================================================================
class BaseDecisionTree(six.with_metaclass(ABCMeta, BaseEstimator,
_LearntSelectorMixin)):
"""Base class for decision trees.
Warning: This class should not be used directly.
Use derived classes instead.
"""
@abstractmethod
def __init__(self,
criterion,
splitter,
max_depth,
min_samples_split,
min_samples_leaf,
min_weight_fraction_leaf,
max_features,
max_leaf_nodes,
random_state,
class_weight=None,
presort=False):
self.criterion = criterion
self.splitter = splitter
self.max_depth = max_depth
self.min_samples_split = min_samples_split
self.min_samples_leaf = min_samples_leaf
self.min_weight_fraction_leaf = min_weight_fraction_leaf
self.max_features = max_features
self.random_state = random_state
self.max_leaf_nodes = max_leaf_nodes
self.class_weight = class_weight
self.presort = presort
self.n_features_ = None
self.n_outputs_ = None
self.classes_ = None
self.n_classes_ = None
self.tree_ = None
self.max_features_ = None
def fit(self, X, y, sample_weight=None, check_input=True,
X_idx_sorted=None):
"""Build a decision tree from the training set (X, y).
Parameters
----------
X : array-like or sparse matrix, shape = [n_samples, n_features]
The training input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csc_matrix``.
y : array-like, shape = [n_samples] or [n_samples, n_outputs]
The target values (class labels in classification, real numbers in
regression). In the regression case, use ``dtype=np.float64`` and
``order='C'`` for maximum efficiency.
sample_weight : array-like, shape = [n_samples] or None
Sample weights. If None, then samples are equally weighted. Splits
that would create child nodes with net zero or negative weight are
ignored while searching for a split in each node. In the case of
classification, splits are also ignored if they would result in any
single class carrying a negative weight in either child node.
check_input : boolean, (default=True)
Allow to bypass several input checking.
Don't use this parameter unless you know what you do.
X_idx_sorted : array-like, shape = [n_samples, n_features], optional
The indexes of the sorted training input samples. If many tree
are grown on the same dataset, this allows the ordering to be
cached between trees. If None, the data will be sorted here.
Don't use this parameter unless you know what to do.
Returns
-------
self : object
Returns self.
"""
random_state = check_random_state(self.random_state)
if check_input:
X = check_array(X, dtype=DTYPE, accept_sparse="csc")
y = check_array(y, ensure_2d=False, dtype=None)
if issparse(X):
X.sort_indices()
if X.indices.dtype != np.intc or X.indptr.dtype != np.intc:
raise ValueError("No support for np.int64 index based "
"sparse matrices")
# Determine output settings
n_samples, self.n_features_ = X.shape
is_classification = isinstance(self, ClassifierMixin)
y = np.atleast_1d(y)
expanded_class_weight = None
if y.ndim == 1:
# reshape is necessary to preserve the data contiguity against vs
# [:, np.newaxis] that does not.
y = np.reshape(y, (-1, 1))
self.n_outputs_ = y.shape[1]
if is_classification:
check_classification_targets(y)
y = np.copy(y)
self.classes_ = []
self.n_classes_ = []
if self.class_weight is not None:
y_original = np.copy(y)
y_encoded = np.zeros(y.shape, dtype=np.int)
for k in range(self.n_outputs_):
classes_k, y_encoded[:, k] = np.unique(y[:, k],
return_inverse=True)
self.classes_.append(classes_k)
self.n_classes_.append(classes_k.shape[0])
y = y_encoded
if self.class_weight is not None:
expanded_class_weight = compute_sample_weight(
self.class_weight, y_original)
else:
self.classes_ = [None] * self.n_outputs_
self.n_classes_ = [1] * self.n_outputs_
self.n_classes_ = np.array(self.n_classes_, dtype=np.intp)
if getattr(y, "dtype", None) != DOUBLE or not y.flags.contiguous:
y = np.ascontiguousarray(y, dtype=DOUBLE)
# Check parameters
max_depth = ((2 ** 31) - 1 if self.max_depth is None
else self.max_depth)
max_leaf_nodes = (-1 if self.max_leaf_nodes is None
else self.max_leaf_nodes)
if isinstance(self.min_samples_leaf, (numbers.Integral, np.integer)):
min_samples_leaf = self.min_samples_leaf
else: # float
min_samples_leaf = int(ceil(self.min_samples_leaf * n_samples))
if isinstance(self.min_samples_split, (numbers.Integral, np.integer)):
min_samples_split = self.min_samples_split
else: # float
min_samples_split = int(ceil(self.min_samples_split * n_samples))
min_samples_split = max(2, min_samples_split)
min_samples_split = max(min_samples_split, 2 * min_samples_leaf)
if isinstance(self.max_features, six.string_types):
if self.max_features == "auto":
if is_classification:
max_features = max(1, int(np.sqrt(self.n_features_)))
else:
max_features = self.n_features_
elif self.max_features == "sqrt":
max_features = max(1, int(np.sqrt(self.n_features_)))
elif self.max_features == "log2":
max_features = max(1, int(np.log2(self.n_features_)))
else:
raise ValueError(
'Invalid value for max_features. Allowed string '
'values are "auto", "sqrt" or "log2".')
elif self.max_features is None:
max_features = self.n_features_
elif isinstance(self.max_features, (numbers.Integral, np.integer)):
max_features = self.max_features
else: # float
if self.max_features > 0.0:
max_features = max(1,
int(self.max_features * self.n_features_))
else:
max_features = 0
self.max_features_ = max_features
if len(y) != n_samples:
raise ValueError("Number of labels=%d does not match "
"number of samples=%d" % (len(y), n_samples))
if not (0. < self.min_samples_split <= 1. or
2 <= self.min_samples_split):
raise ValueError("min_samples_split must be in at least 2"
" or in (0, 1], got %s" % min_samples_split)
if not (0. < self.min_samples_leaf <= 0.5 or
1 <= self.min_samples_leaf):
raise ValueError("min_samples_leaf must be at least than 1 "
"or in (0, 0.5], got %s" % min_samples_leaf)
if not 0 <= self.min_weight_fraction_leaf <= 0.5:
raise ValueError("min_weight_fraction_leaf must in [0, 0.5]")
if max_depth <= 0:
raise ValueError("max_depth must be greater than zero. ")
if not (0 < max_features <= self.n_features_):
raise ValueError("max_features must be in (0, n_features]")
if not isinstance(max_leaf_nodes, (numbers.Integral, np.integer)):
raise ValueError("max_leaf_nodes must be integral number but was "
"%r" % max_leaf_nodes)
if -1 < max_leaf_nodes < 2:
raise ValueError(("max_leaf_nodes {0} must be either smaller than "
"0 or larger than 1").format(max_leaf_nodes))
if sample_weight is not None:
if (getattr(sample_weight, "dtype", None) != DOUBLE or
not sample_weight.flags.contiguous):
sample_weight = np.ascontiguousarray(
sample_weight, dtype=DOUBLE)
if len(sample_weight.shape) > 1:
raise ValueError("Sample weights array has more "
"than one dimension: %d" %
len(sample_weight.shape))
if len(sample_weight) != n_samples:
raise ValueError("Number of weights=%d does not match "
"number of samples=%d" %
(len(sample_weight), n_samples))
if expanded_class_weight is not None:
if sample_weight is not None:
sample_weight = sample_weight * expanded_class_weight
else:
sample_weight = expanded_class_weight
# Set min_weight_leaf from min_weight_fraction_leaf
if self.min_weight_fraction_leaf != 0. and sample_weight is not None:
min_weight_leaf = (self.min_weight_fraction_leaf *
np.sum(sample_weight))
else:
min_weight_leaf = 0.
presort = self.presort
# Allow presort to be 'auto', which means True if the dataset is dense,
# otherwise it will be False.
if self.presort == 'auto' and issparse(X):
presort = False
elif self.presort == 'auto':
presort = True
if presort is True and issparse(X):
raise ValueError("Presorting is not supported for sparse "
"matrices.")
# If multiple trees are built on the same dataset, we only want to
# presort once. Splitters now can accept presorted indices if desired,
# but do not handle any presorting themselves. Ensemble algorithms
# which desire presorting must do presorting themselves and pass that
# matrix into each tree.
if X_idx_sorted is None and presort:
X_idx_sorted = np.asfortranarray(np.argsort(X, axis=0),
dtype=np.int32)
if presort and X_idx_sorted.shape != X.shape:
raise ValueError("The shape of X (X.shape = {}) doesn't match "
"the shape of X_idx_sorted (X_idx_sorted"
".shape = {})".format(X.shape,
X_idx_sorted.shape))
# Build tree
criterion = self.criterion
if not isinstance(criterion, Criterion):
if is_classification:
criterion = CRITERIA_CLF[self.criterion](self.n_outputs_,
self.n_classes_)
else:
criterion = CRITERIA_REG[self.criterion](self.n_outputs_)
SPLITTERS = SPARSE_SPLITTERS if issparse(X) else DENSE_SPLITTERS
splitter = self.splitter
if not isinstance(self.splitter, Splitter):
splitter = SPLITTERS[self.splitter](criterion,
self.max_features_,
min_samples_leaf,
min_weight_leaf,
random_state,
self.presort)
self.tree_ = Tree(self.n_features_, self.n_classes_, self.n_outputs_)
# Use BestFirst if max_leaf_nodes given; use DepthFirst otherwise
if max_leaf_nodes < 0:
builder = DepthFirstTreeBuilder(splitter, min_samples_split,
min_samples_leaf,
min_weight_leaf,
max_depth)
else:
builder = BestFirstTreeBuilder(splitter, min_samples_split,
min_samples_leaf,
min_weight_leaf,
max_depth,
max_leaf_nodes)
builder.build(self.tree_, X, y, sample_weight, X_idx_sorted)
if self.n_outputs_ == 1:
self.n_classes_ = self.n_classes_[0]
self.classes_ = self.classes_[0]
return self
def _validate_X_predict(self, X, check_input):
"""Validate X whenever one tries to predict, apply, predict_proba"""
if self.tree_ is None:
raise NotFittedError("Estimator not fitted, "
"call `fit` before exploiting the model.")
if check_input:
X = check_array(X, dtype=DTYPE, accept_sparse="csr")
if issparse(X) and (X.indices.dtype != np.intc or
X.indptr.dtype != np.intc):
raise ValueError("No support for np.int64 index based "
"sparse matrices")
n_features = X.shape[1]
if self.n_features_ != n_features:
raise ValueError("Number of features of the model must "
"match the input. Model n_features is %s and "
"input n_features is %s "
% (self.n_features_, n_features))
return X
def predict(self, X, check_input=True):
"""Predict class or regression value for X.
For a classification model, the predicted class for each sample in X is
returned. For a regression model, the predicted value based on X is
returned.
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
check_input : boolean, (default=True)
Allow to bypass several input checking.
Don't use this parameter unless you know what you do.
Returns
-------
y : array of shape = [n_samples] or [n_samples, n_outputs]
The predicted classes, or the predict values.
"""
X = self._validate_X_predict(X, check_input)
proba = self.tree_.predict(X)
n_samples = X.shape[0]
# Classification
if isinstance(self, ClassifierMixin):
if self.n_outputs_ == 1:
return self.classes_.take(np.argmax(proba, axis=1), axis=0)
else:
predictions = np.zeros((n_samples, self.n_outputs_))
for k in range(self.n_outputs_):
predictions[:, k] = self.classes_[k].take(
np.argmax(proba[:, k], axis=1),
axis=0)
return predictions
# Regression
else:
if self.n_outputs_ == 1:
return proba[:, 0]
else:
return proba[:, :, 0]
def apply(self, X, check_input=True):
"""
Returns the index of the leaf that each sample is predicted as.
.. versionadded:: 0.17
Parameters
----------
X : array_like or sparse matrix, shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
check_input : boolean, (default=True)
Allow to bypass several input checking.
Don't use this parameter unless you know what you do.
Returns
-------
X_leaves : array_like, shape = [n_samples,]
For each datapoint x in X, return the index of the leaf x
ends up in. Leaves are numbered within
``[0; self.tree_.node_count)``, possibly with gaps in the
numbering.
"""
X = self._validate_X_predict(X, check_input)
return self.tree_.apply(X)
def decision_path(self, X, check_input=True):
"""Return the decision path in the tree
Parameters
----------
X : array_like or sparse matrix, shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
check_input : boolean, (default=True)
Allow to bypass several input checking.
Don't use this parameter unless you know what you do.
Returns
-------
indicator : sparse csr array, shape = [n_samples, n_nodes]
Return a node indicator matrix where non zero elements
indicates that the samples goes through the nodes.
"""
X = self._validate_X_predict(X, check_input)
return self.tree_.decision_path(X)
@property
def feature_importances_(self):
"""Return the feature importances.
The importance of a feature is computed as the (normalized) total
reduction of the criterion brought by that feature.
It is also known as the Gini importance.
Returns
-------
feature_importances_ : array, shape = [n_features]
"""
if self.tree_ is None:
raise NotFittedError("Estimator not fitted, call `fit` before"
" `feature_importances_`.")
return self.tree_.compute_feature_importances()
# =============================================================================
# Public estimators
# =============================================================================
class DecisionTreeClassifier(BaseDecisionTree, ClassifierMixin):
"""A decision tree classifier.
Read more in the :ref:`User Guide <tree>`.
Parameters
----------
criterion : string, optional (default="gini")
The function to measure the quality of a split. Supported criteria are
"gini" for the Gini impurity and "entropy" for the information gain.
splitter : string, optional (default="best")
The strategy used to choose the split at each node. Supported
strategies are "best" to choose the best split and "random" to choose
the best random split.
max_features : int, float, string or None, optional (default=None)
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a percentage and
`int(max_features * n_features)` features are considered at each
split.
- If "auto", then `max_features=sqrt(n_features)`.
- If "sqrt", then `max_features=sqrt(n_features)`.
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Note: the search for a split does not stop until at least one
valid partition of the node samples is found, even if it requires to
effectively inspect more than ``max_features`` features.
max_depth : int or None, optional (default=None)
The maximum depth of the tree. If None, then nodes are expanded until
all leaves are pure or until all leaves contain less than
min_samples_split samples.
Ignored if ``max_leaf_nodes`` is not None.
min_samples_split : int, float, optional (default=2)
The minimum number of samples required to split an internal node:
- If int, then consider `min_samples_split` as the minimum number.
- If float, then `min_samples_split` is a percentage and
`ceil(min_samples_split * n_samples)` are the minimum
number of samples for each split.
min_samples_leaf : int, float, optional (default=1)
The minimum number of samples required to be at a leaf node:
- If int, then consider `min_samples_leaf` as the minimum number.
- If float, then `min_samples_leaf` is a percentage and
`ceil(min_samples_leaf * n_samples)` are the minimum
number of samples for each node.
min_weight_fraction_leaf : float, optional (default=0.)
The minimum weighted fraction of the input samples required to be at a
leaf node.
max_leaf_nodes : int or None, optional (default=None)
Grow a tree with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
If not None then ``max_depth`` will be ignored.
class_weight : dict, list of dicts, "balanced" or None, optional (default=None)
Weights associated with classes in the form ``{class_label: weight}``.
If not given, all classes are supposed to have weight one. For
multi-output problems, a list of dicts can be provided in the same
order as the columns of y.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
For multi-output, the weights of each column of y will be multiplied.
Note that these weights will be multiplied with sample_weight (passed
through the fit method) if sample_weight is specified.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
presort : bool, optional (default=False)
Whether to presort the data to speed up the finding of best splits in
fitting. For the default settings of a decision tree on large
datasets, setting this to true may slow down the training process.
When using either a smaller dataset or a restricted depth, this may
speed up the training.
Attributes
----------
classes_ : array of shape = [n_classes] or a list of such arrays
The classes labels (single output problem),
or a list of arrays of class labels (multi-output problem).
feature_importances_ : array of shape = [n_features]
The feature importances. The higher, the more important the
feature. The importance of a feature is computed as the (normalized)
total reduction of the criterion brought by that feature. It is also
known as the Gini importance [4]_.
max_features_ : int,
The inferred value of max_features.
n_classes_ : int or list
The number of classes (for single output problems),
or a list containing the number of classes for each
output (for multi-output problems).
n_features_ : int
The number of features when ``fit`` is performed.
n_outputs_ : int
The number of outputs when ``fit`` is performed.
tree_ : Tree object
The underlying Tree object.
See also
--------
DecisionTreeRegressor
References
----------
.. [1] http://en.wikipedia.org/wiki/Decision_tree_learning
.. [2] L. Breiman, J. Friedman, R. Olshen, and C. Stone, "Classification
and Regression Trees", Wadsworth, Belmont, CA, 1984.
.. [3] T. Hastie, R. Tibshirani and J. Friedman. "Elements of Statistical
Learning", Springer, 2009.
.. [4] L. Breiman, and A. Cutler, "Random Forests",
http://www.stat.berkeley.edu/~breiman/RandomForests/cc_home.htm
Examples
--------
>>> from sklearn.datasets import load_iris
>>> from sklearn.model_selection import cross_val_score
>>> from sklearn.tree import DecisionTreeClassifier
>>> clf = DecisionTreeClassifier(random_state=0)
>>> iris = load_iris()
>>> cross_val_score(clf, iris.data, iris.target, cv=10)
... # doctest: +SKIP
...
array([ 1. , 0.93..., 0.86..., 0.93..., 0.93...,
0.93..., 0.93..., 1. , 0.93..., 1. ])
"""
def __init__(self,
criterion="gini",
splitter="best",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features=None,
random_state=None,
max_leaf_nodes=None,
class_weight=None,
presort=False):
super(DecisionTreeClassifier, self).__init__(
criterion=criterion,
splitter=splitter,
max_depth=max_depth,
min_samples_split=min_samples_split,
min_samples_leaf=min_samples_leaf,
min_weight_fraction_leaf=min_weight_fraction_leaf,
max_features=max_features,
max_leaf_nodes=max_leaf_nodes,
class_weight=class_weight,
random_state=random_state,
presort=presort)
def predict_proba(self, X, check_input=True):
"""Predict class probabilities of the input samples X.
The predicted class probability is the fraction of samples of the same
class in a leaf.
check_input : boolean, (default=True)
Allow to bypass several input checking.
Don't use this parameter unless you know what you do.
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
Returns
-------
p : array of shape = [n_samples, n_classes], or a list of n_outputs
such arrays if n_outputs > 1.
The class probabilities of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
"""
X = self._validate_X_predict(X, check_input)
proba = self.tree_.predict(X)
if self.n_outputs_ == 1:
proba = proba[:, :self.n_classes_]
normalizer = proba.sum(axis=1)[:, np.newaxis]
normalizer[normalizer == 0.0] = 1.0
proba /= normalizer
return proba
else:
all_proba = []
for k in range(self.n_outputs_):
proba_k = proba[:, k, :self.n_classes_[k]]
normalizer = proba_k.sum(axis=1)[:, np.newaxis]
normalizer[normalizer == 0.0] = 1.0
proba_k /= normalizer
all_proba.append(proba_k)
return all_proba
def predict_log_proba(self, X):
"""Predict class log-probabilities of the input samples X.
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
Returns
-------
p : array of shape = [n_samples, n_classes], or a list of n_outputs
such arrays if n_outputs > 1.
The class log-probabilities of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
"""
proba = self.predict_proba(X)
if self.n_outputs_ == 1:
return np.log(proba)
else:
for k in range(self.n_outputs_):
proba[k] = np.log(proba[k])
return proba
class DecisionTreeRegressor(BaseDecisionTree, RegressorMixin):
"""A decision tree regressor.
Read more in the :ref:`User Guide <tree>`.
Parameters
----------
criterion : string, optional (default="mse")
The function to measure the quality of a split. The only supported
criterion is "mse" for the mean squared error, which is equal to
variance reduction as feature selection criterion.
splitter : string, optional (default="best")
The strategy used to choose the split at each node. Supported
strategies are "best" to choose the best split and "random" to choose
the best random split.
max_features : int, float, string or None, optional (default=None)
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a percentage and
`int(max_features * n_features)` features are considered at each
split.
- If "auto", then `max_features=n_features`.
- If "sqrt", then `max_features=sqrt(n_features)`.
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Note: the search for a split does not stop until at least one
valid partition of the node samples is found, even if it requires to
effectively inspect more than ``max_features`` features.
max_depth : int or None, optional (default=None)
The maximum depth of the tree. If None, then nodes are expanded until
all leaves are pure or until all leaves contain less than
min_samples_split samples.
Ignored if ``max_leaf_nodes`` is not None.
min_samples_split : int, float, optional (default=2)
The minimum number of samples required to split an internal node:
- If int, then consider `min_samples_split` as the minimum number.
- If float, then `min_samples_split` is a percentage and
`ceil(min_samples_split * n_samples)` are the minimum
number of samples for each split.
min_samples_leaf : int, float, optional (default=1)
The minimum number of samples required to be at a leaf node:
- If int, then consider `min_samples_leaf` as the minimum number.
- If float, then `min_samples_leaf` is a percentage and
`ceil(min_samples_leaf * n_samples)` are the minimum
number of samples for each node.
min_weight_fraction_leaf : float, optional (default=0.)
The minimum weighted fraction of the input samples required to be at a
leaf node.
max_leaf_nodes : int or None, optional (default=None)
Grow a tree with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
If not None then ``max_depth`` will be ignored.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
presort : bool, optional (default=False)
Whether to presort the data to speed up the finding of best splits in
fitting. For the default settings of a decision tree on large
datasets, setting this to true may slow down the training process.
When using either a smaller dataset or a restricted depth, this may
speed up the training.
Attributes
----------
feature_importances_ : array of shape = [n_features]
The feature importances.
The higher, the more important the feature.
The importance of a feature is computed as the
(normalized) total reduction of the criterion brought
by that feature. It is also known as the Gini importance [4]_.
max_features_ : int,
The inferred value of max_features.
n_features_ : int
The number of features when ``fit`` is performed.
n_outputs_ : int
The number of outputs when ``fit`` is performed.
tree_ : Tree object
The underlying Tree object.
See also
--------
DecisionTreeClassifier
References
----------
.. [1] http://en.wikipedia.org/wiki/Decision_tree_learning
.. [2] L. Breiman, J. Friedman, R. Olshen, and C. Stone, "Classification
and Regression Trees", Wadsworth, Belmont, CA, 1984.
.. [3] T. Hastie, R. Tibshirani and J. Friedman. "Elements of Statistical
Learning", Springer, 2009.
.. [4] L. Breiman, and A. Cutler, "Random Forests",
http://www.stat.berkeley.edu/~breiman/RandomForests/cc_home.htm
Examples
--------
>>> from sklearn.datasets import load_boston
>>> from sklearn.model_selection import cross_val_score
>>> from sklearn.tree import DecisionTreeRegressor
>>> boston = load_boston()
>>> regressor = DecisionTreeRegressor(random_state=0)
>>> cross_val_score(regressor, boston.data, boston.target, cv=10)
... # doctest: +SKIP
...
array([ 0.61..., 0.57..., -0.34..., 0.41..., 0.75...,
0.07..., 0.29..., 0.33..., -1.42..., -1.77...])
"""
def __init__(self,
criterion="mse",
splitter="best",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features=None,
random_state=None,
max_leaf_nodes=None,
presort=False):
super(DecisionTreeRegressor, self).__init__(
criterion=criterion,
splitter=splitter,
max_depth=max_depth,
min_samples_split=min_samples_split,
min_samples_leaf=min_samples_leaf,
min_weight_fraction_leaf=min_weight_fraction_leaf,
max_features=max_features,
max_leaf_nodes=max_leaf_nodes,
random_state=random_state,
presort=presort)
class ExtraTreeClassifier(DecisionTreeClassifier):
"""An extremely randomized tree classifier.
Extra-trees differ from classic decision trees in the way they are built.
When looking for the best split to separate the samples of a node into two
groups, random splits are drawn for each of the `max_features` randomly
selected features and the best split among those is chosen. When
`max_features` is set 1, this amounts to building a totally random
decision tree.
Warning: Extra-trees should only be used within ensemble methods.
Read more in the :ref:`User Guide <tree>`.
See also
--------
ExtraTreeRegressor, ExtraTreesClassifier, ExtraTreesRegressor
References
----------
.. [1] P. Geurts, D. Ernst., and L. Wehenkel, "Extremely randomized trees",
Machine Learning, 63(1), 3-42, 2006.
"""
def __init__(self,
criterion="gini",
splitter="random",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features="auto",
random_state=None,
max_leaf_nodes=None,
class_weight=None):
super(ExtraTreeClassifier, self).__init__(
criterion=criterion,
splitter=splitter,
max_depth=max_depth,
min_samples_split=min_samples_split,
min_samples_leaf=min_samples_leaf,
min_weight_fraction_leaf=min_weight_fraction_leaf,
max_features=max_features,
max_leaf_nodes=max_leaf_nodes,
class_weight=class_weight,
random_state=random_state)
class ExtraTreeRegressor(DecisionTreeRegressor):
"""An extremely randomized tree regressor.
Extra-trees differ from classic decision trees in the way they are built.
When looking for the best split to separate the samples of a node into two
groups, random splits are drawn for each of the `max_features` randomly
selected features and the best split among those is chosen. When
`max_features` is set 1, this amounts to building a totally random
decision tree.
Warning: Extra-trees should only be used within ensemble methods.
Read more in the :ref:`User Guide <tree>`.
See also
--------
ExtraTreeClassifier, ExtraTreesClassifier, ExtraTreesRegressor
References
----------
.. [1] P. Geurts, D. Ernst., and L. Wehenkel, "Extremely randomized trees",
Machine Learning, 63(1), 3-42, 2006.
"""
def __init__(self,
criterion="mse",
splitter="random",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features="auto",
random_state=None,
max_leaf_nodes=None):
super(ExtraTreeRegressor, self).__init__(
criterion=criterion,
splitter=splitter,
max_depth=max_depth,
min_samples_split=min_samples_split,
min_samples_leaf=min_samples_leaf,
min_weight_fraction_leaf=min_weight_fraction_leaf,
max_features=max_features,
max_leaf_nodes=max_leaf_nodes,
random_state=random_state)
| bsd-3-clause |
kjung/scikit-learn | examples/semi_supervised/plot_label_propagation_digits_active_learning.py | 294 | 3417 | """
========================================
Label Propagation digits active learning
========================================
Demonstrates an active learning technique to learn handwritten digits
using label propagation.
We start by training a label propagation model with only 10 labeled points,
then we select the top five most uncertain points to label. Next, we train
with 15 labeled points (original 10 + 5 new ones). We repeat this process
four times to have a model trained with 30 labeled examples.
A plot will appear showing the top 5 most uncertain digits for each iteration
of training. These may or may not contain mistakes, but we will train the next
model with their true labels.
"""
print(__doc__)
# Authors: Clay Woolam <clay@woolam.org>
# Licence: BSD
import numpy as np
import matplotlib.pyplot as plt
from scipy import stats
from sklearn import datasets
from sklearn.semi_supervised import label_propagation
from sklearn.metrics import classification_report, confusion_matrix
digits = datasets.load_digits()
rng = np.random.RandomState(0)
indices = np.arange(len(digits.data))
rng.shuffle(indices)
X = digits.data[indices[:330]]
y = digits.target[indices[:330]]
images = digits.images[indices[:330]]
n_total_samples = len(y)
n_labeled_points = 10
unlabeled_indices = np.arange(n_total_samples)[n_labeled_points:]
f = plt.figure()
for i in range(5):
y_train = np.copy(y)
y_train[unlabeled_indices] = -1
lp_model = label_propagation.LabelSpreading(gamma=0.25, max_iter=5)
lp_model.fit(X, y_train)
predicted_labels = lp_model.transduction_[unlabeled_indices]
true_labels = y[unlabeled_indices]
cm = confusion_matrix(true_labels, predicted_labels,
labels=lp_model.classes_)
print('Iteration %i %s' % (i, 70 * '_'))
print("Label Spreading model: %d labeled & %d unlabeled (%d total)"
% (n_labeled_points, n_total_samples - n_labeled_points, n_total_samples))
print(classification_report(true_labels, predicted_labels))
print("Confusion matrix")
print(cm)
# compute the entropies of transduced label distributions
pred_entropies = stats.distributions.entropy(
lp_model.label_distributions_.T)
# select five digit examples that the classifier is most uncertain about
uncertainty_index = uncertainty_index = np.argsort(pred_entropies)[-5:]
# keep track of indices that we get labels for
delete_indices = np.array([])
f.text(.05, (1 - (i + 1) * .183),
"model %d\n\nfit with\n%d labels" % ((i + 1), i * 5 + 10), size=10)
for index, image_index in enumerate(uncertainty_index):
image = images[image_index]
sub = f.add_subplot(5, 5, index + 1 + (5 * i))
sub.imshow(image, cmap=plt.cm.gray_r)
sub.set_title('predict: %i\ntrue: %i' % (
lp_model.transduction_[image_index], y[image_index]), size=10)
sub.axis('off')
# labeling 5 points, remote from labeled set
delete_index, = np.where(unlabeled_indices == image_index)
delete_indices = np.concatenate((delete_indices, delete_index))
unlabeled_indices = np.delete(unlabeled_indices, delete_indices)
n_labeled_points += 5
f.suptitle("Active learning with Label Propagation.\nRows show 5 most "
"uncertain labels to learn with the next model.")
plt.subplots_adjust(0.12, 0.03, 0.9, 0.8, 0.2, 0.45)
plt.show()
| bsd-3-clause |
zrhans/pythonanywhere | .virtualenvs/django19/lib/python3.4/site-packages/matplotlib/contour.py | 3 | 69667 | """
These are classes to support contour plotting and
labelling for the axes class
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from matplotlib.externals import six
from matplotlib.externals.six.moves import xrange
import warnings
import matplotlib as mpl
import numpy as np
from numpy import ma
import matplotlib._cntr as _cntr
import matplotlib._contour as _contour
import matplotlib.path as mpath
import matplotlib.ticker as ticker
import matplotlib.cm as cm
import matplotlib.colors as colors
import matplotlib.collections as mcoll
import matplotlib.font_manager as font_manager
import matplotlib.text as text
import matplotlib.cbook as cbook
import matplotlib.mlab as mlab
import matplotlib.mathtext as mathtext
import matplotlib.patches as mpatches
import matplotlib.texmanager as texmanager
import matplotlib.transforms as mtrans
# Import needed for adding manual selection capability to clabel
from matplotlib.blocking_input import BlockingContourLabeler
# We can't use a single line collection for contour because a line
# collection can have only a single line style, and we want to be able to have
# dashed negative contours, for example, and solid positive contours.
# We could use a single polygon collection for filled contours, but it
# seems better to keep line and filled contours similar, with one collection
# per level.
class ClabelText(text.Text):
"""
Unlike the ordinary text, the get_rotation returns an updated
angle in the pixel coordinate assuming that the input rotation is
an angle in data coordinate (or whatever transform set).
"""
def get_rotation(self):
angle = text.Text.get_rotation(self)
trans = self.get_transform()
x, y = self.get_position()
new_angles = trans.transform_angles(np.array([angle]),
np.array([[x, y]]))
return new_angles[0]
class ContourLabeler(object):
"""Mixin to provide labelling capability to ContourSet"""
def clabel(self, *args, **kwargs):
"""
Label a contour plot.
Call signature::
clabel(cs, **kwargs)
Adds labels to line contours in *cs*, where *cs* is a
:class:`~matplotlib.contour.ContourSet` object returned by
contour.
::
clabel(cs, v, **kwargs)
only labels contours listed in *v*.
Optional keyword arguments:
*fontsize*:
size in points or relative size e.g., 'smaller', 'x-large'
*colors*:
- if *None*, the color of each label matches the color of
the corresponding contour
- if one string color, e.g., *colors* = 'r' or *colors* =
'red', all labels will be plotted in this color
- if a tuple of matplotlib color args (string, float, rgb, etc),
different labels will be plotted in different colors in the order
specified
*inline*:
controls whether the underlying contour is removed or
not. Default is *True*.
*inline_spacing*:
space in pixels to leave on each side of label when
placing inline. Defaults to 5. This spacing will be
exact for labels at locations where the contour is
straight, less so for labels on curved contours.
*fmt*:
a format string for the label. Default is '%1.3f'
Alternatively, this can be a dictionary matching contour
levels with arbitrary strings to use for each contour level
(i.e., fmt[level]=string), or it can be any callable, such
as a :class:`~matplotlib.ticker.Formatter` instance, that
returns a string when called with a numeric contour level.
*manual*:
if *True*, contour labels will be placed manually using
mouse clicks. Click the first button near a contour to
add a label, click the second button (or potentially both
mouse buttons at once) to finish adding labels. The third
button can be used to remove the last label added, but
only if labels are not inline. Alternatively, the keyboard
can be used to select label locations (enter to end label
placement, delete or backspace act like the third mouse button,
and any other key will select a label location).
*manual* can be an iterable object of x,y tuples. Contour labels
will be created as if mouse is clicked at each x,y positions.
*rightside_up*:
if *True* (default), label rotations will always be plus
or minus 90 degrees from level.
*use_clabeltext*:
if *True* (default is False), ClabelText class (instead of
matplotlib.Text) is used to create labels. ClabelText
recalculates rotation angles of texts during the drawing time,
therefore this can be used if aspect of the axes changes.
.. plot:: mpl_examples/pylab_examples/contour_demo.py
"""
"""
NOTES on how this all works:
clabel basically takes the input arguments and uses them to
add a list of "label specific" attributes to the ContourSet
object. These attributes are all of the form label* and names
should be fairly self explanatory.
Once these attributes are set, clabel passes control to the
labels method (case of automatic label placement) or
BlockingContourLabeler (case of manual label placement).
"""
fontsize = kwargs.get('fontsize', None)
inline = kwargs.get('inline', 1)
inline_spacing = kwargs.get('inline_spacing', 5)
self.labelFmt = kwargs.get('fmt', '%1.3f')
_colors = kwargs.get('colors', None)
self._use_clabeltext = kwargs.get('use_clabeltext', False)
# Detect if manual selection is desired and remove from argument list
self.labelManual = kwargs.get('manual', False)
self.rightside_up = kwargs.get('rightside_up', True)
if len(args) == 0:
levels = self.levels
indices = list(xrange(len(self.cvalues)))
elif len(args) == 1:
levlabs = list(args[0])
indices, levels = [], []
for i, lev in enumerate(self.levels):
if lev in levlabs:
indices.append(i)
levels.append(lev)
if len(levels) < len(levlabs):
msg = "Specified levels " + str(levlabs)
msg += "\n don't match available levels "
msg += str(self.levels)
raise ValueError(msg)
else:
raise TypeError("Illegal arguments to clabel, see help(clabel)")
self.labelLevelList = levels
self.labelIndiceList = indices
self.labelFontProps = font_manager.FontProperties()
self.labelFontProps.set_size(fontsize)
font_size_pts = self.labelFontProps.get_size_in_points()
self.labelFontSizeList = [font_size_pts] * len(levels)
if _colors is None:
self.labelMappable = self
self.labelCValueList = np.take(self.cvalues, self.labelIndiceList)
else:
cmap = colors.ListedColormap(_colors, N=len(self.labelLevelList))
self.labelCValueList = list(xrange(len(self.labelLevelList)))
self.labelMappable = cm.ScalarMappable(cmap=cmap,
norm=colors.NoNorm())
self.labelXYs = []
if cbook.iterable(self.labelManual):
for x, y in self.labelManual:
self.add_label_near(x, y, inline,
inline_spacing)
elif self.labelManual:
print('Select label locations manually using first mouse button.')
print('End manual selection with second mouse button.')
if not inline:
print('Remove last label by clicking third mouse button.')
blocking_contour_labeler = BlockingContourLabeler(self)
blocking_contour_labeler(inline, inline_spacing)
else:
self.labels(inline, inline_spacing)
# Hold on to some old attribute names. These are deprecated and will
# be removed in the near future (sometime after 2008-08-01), but
# keeping for now for backwards compatibility
self.cl = self.labelTexts
self.cl_xy = self.labelXYs
self.cl_cvalues = self.labelCValues
self.labelTextsList = cbook.silent_list('text.Text', self.labelTexts)
return self.labelTextsList
def print_label(self, linecontour, labelwidth):
"Return *False* if contours are too short for a label."
lcsize = len(linecontour)
if lcsize > 10 * labelwidth:
return True
xmax = np.amax(linecontour[:, 0])
xmin = np.amin(linecontour[:, 0])
ymax = np.amax(linecontour[:, 1])
ymin = np.amin(linecontour[:, 1])
lw = labelwidth
if (xmax - xmin) > 1.2 * lw or (ymax - ymin) > 1.2 * lw:
return True
else:
return False
def too_close(self, x, y, lw):
"Return *True* if a label is already near this location."
for loc in self.labelXYs:
d = np.sqrt((x - loc[0]) ** 2 + (y - loc[1]) ** 2)
if d < 1.2 * lw:
return True
return False
def get_label_coords(self, distances, XX, YY, ysize, lw):
"""
Return x, y, and the index of a label location.
Labels are plotted at a location with the smallest
deviation of the contour from a straight line
unless there is another label nearby, in which case
the next best place on the contour is picked up.
If all such candidates are rejected, the beginning
of the contour is chosen.
"""
hysize = int(ysize / 2)
adist = np.argsort(distances)
for ind in adist:
x, y = XX[ind][hysize], YY[ind][hysize]
if self.too_close(x, y, lw):
continue
return x, y, ind
ind = adist[0]
x, y = XX[ind][hysize], YY[ind][hysize]
return x, y, ind
def get_label_width(self, lev, fmt, fsize):
"""
Return the width of the label in points.
"""
if not cbook.is_string_like(lev):
lev = self.get_text(lev, fmt)
lev, ismath = text.Text.is_math_text(lev)
if ismath == 'TeX':
if not hasattr(self, '_TeX_manager'):
self._TeX_manager = texmanager.TexManager()
lw, _, _ = self._TeX_manager.get_text_width_height_descent(lev,
fsize)
elif ismath:
if not hasattr(self, '_mathtext_parser'):
self._mathtext_parser = mathtext.MathTextParser('bitmap')
img, _ = self._mathtext_parser.parse(lev, dpi=72,
prop=self.labelFontProps)
lw = img.get_width() # at dpi=72, the units are PostScript points
else:
# width is much less than "font size"
lw = (len(lev)) * fsize * 0.6
return lw
def get_real_label_width(self, lev, fmt, fsize):
"""
This computes actual onscreen label width.
This uses some black magic to determine onscreen extent of non-drawn
label. This magic may not be very robust.
This method is not being used, and may be modified or removed.
"""
# Find middle of axes
xx = np.mean(np.asarray(self.ax.axis()).reshape(2, 2), axis=1)
# Temporarily create text object
t = text.Text(xx[0], xx[1])
self.set_label_props(t, self.get_text(lev, fmt), 'k')
# Some black magic to get onscreen extent
# NOTE: This will only work for already drawn figures, as the canvas
# does not have a renderer otherwise. This is the reason this function
# can't be integrated into the rest of the code.
bbox = t.get_window_extent(renderer=self.ax.figure.canvas.renderer)
# difference in pixel extent of image
lw = np.diff(bbox.corners()[0::2, 0])[0]
return lw
def set_label_props(self, label, text, color):
"set the label properties - color, fontsize, text"
label.set_text(text)
label.set_color(color)
label.set_fontproperties(self.labelFontProps)
label.set_clip_box(self.ax.bbox)
def get_text(self, lev, fmt):
"get the text of the label"
if cbook.is_string_like(lev):
return lev
else:
if isinstance(fmt, dict):
return fmt[lev]
elif six.callable(fmt):
return fmt(lev)
else:
return fmt % lev
def locate_label(self, linecontour, labelwidth):
"""
Find a good place to plot a label (relatively flat
part of the contour).
"""
nsize = len(linecontour)
if labelwidth > 1:
xsize = int(np.ceil(nsize / labelwidth))
else:
xsize = 1
if xsize == 1:
ysize = nsize
else:
ysize = int(labelwidth)
XX = np.resize(linecontour[:, 0], (xsize, ysize))
YY = np.resize(linecontour[:, 1], (xsize, ysize))
# I might have fouled up the following:
yfirst = YY[:, 0].reshape(xsize, 1)
ylast = YY[:, -1].reshape(xsize, 1)
xfirst = XX[:, 0].reshape(xsize, 1)
xlast = XX[:, -1].reshape(xsize, 1)
s = (yfirst - YY) * (xlast - xfirst) - (xfirst - XX) * (ylast - yfirst)
L = np.sqrt((xlast - xfirst) ** 2 + (ylast - yfirst) ** 2).ravel()
dist = np.add.reduce(([(abs(s)[i] / L[i]) for i in range(xsize)]), -1)
x, y, ind = self.get_label_coords(dist, XX, YY, ysize, labelwidth)
# There must be a more efficient way...
lc = [tuple(l) for l in linecontour]
dind = lc.index((x, y))
return x, y, dind
def calc_label_rot_and_inline(self, slc, ind, lw, lc=None, spacing=5):
"""
This function calculates the appropriate label rotation given
the linecontour coordinates in screen units, the index of the
label location and the label width.
It will also break contour and calculate inlining if *lc* is
not empty (lc defaults to the empty list if None). *spacing*
is the space around the label in pixels to leave empty.
Do both of these tasks at once to avoid calling mlab.path_length
multiple times, which is relatively costly.
The method used here involves calculating the path length
along the contour in pixel coordinates and then looking
approximately label width / 2 away from central point to
determine rotation and then to break contour if desired.
"""
if lc is None:
lc = []
# Half the label width
hlw = lw / 2.0
# Check if closed and, if so, rotate contour so label is at edge
closed = mlab.is_closed_polygon(slc)
if closed:
slc = np.r_[slc[ind:-1], slc[:ind + 1]]
if len(lc): # Rotate lc also if not empty
lc = np.r_[lc[ind:-1], lc[:ind + 1]]
ind = 0
# Path length in pixel space
pl = mlab.path_length(slc)
pl = pl - pl[ind]
# Use linear interpolation to get points around label
xi = np.array([-hlw, hlw])
if closed: # Look at end also for closed contours
dp = np.array([pl[-1], 0])
else:
dp = np.zeros_like(xi)
ll = mlab.less_simple_linear_interpolation(pl, slc, dp + xi,
extrap=True)
# get vector in pixel space coordinates from one point to other
dd = np.diff(ll, axis=0).ravel()
# Get angle of vector - must be calculated in pixel space for
# text rotation to work correctly
if np.all(dd == 0): # Must deal with case of zero length label
rotation = 0.0
else:
rotation = np.arctan2(dd[1], dd[0]) * 180.0 / np.pi
if self.rightside_up:
# Fix angle so text is never upside-down
if rotation > 90:
rotation = rotation - 180.0
if rotation < -90:
rotation = 180.0 + rotation
# Break contour if desired
nlc = []
if len(lc):
# Expand range by spacing
xi = dp + xi + np.array([-spacing, spacing])
# Get indices near points of interest
I = mlab.less_simple_linear_interpolation(
pl, np.arange(len(pl)), xi, extrap=False)
# If those indices aren't beyond contour edge, find x,y
if (not np.isnan(I[0])) and int(I[0]) != I[0]:
xy1 = mlab.less_simple_linear_interpolation(
pl, lc, [xi[0]])
if (not np.isnan(I[1])) and int(I[1]) != I[1]:
xy2 = mlab.less_simple_linear_interpolation(
pl, lc, [xi[1]])
# Round to integer values but keep as float
# To allow check against nan below
I = [np.floor(I[0]), np.ceil(I[1])]
# Actually break contours
if closed:
# This will remove contour if shorter than label
if np.all(~np.isnan(I)):
nlc.append(np.r_[xy2, lc[int(I[1]):int(I[0]) + 1], xy1])
else:
# These will remove pieces of contour if they have length zero
if not np.isnan(I[0]):
nlc.append(np.r_[lc[:int(I[0]) + 1], xy1])
if not np.isnan(I[1]):
nlc.append(np.r_[xy2, lc[int(I[1]):]])
# The current implementation removes contours completely
# covered by labels. Uncomment line below to keep
# original contour if this is the preferred behavior.
# if not len(nlc): nlc = [ lc ]
return rotation, nlc
def _get_label_text(self, x, y, rotation):
dx, dy = self.ax.transData.inverted().transform_point((x, y))
t = text.Text(dx, dy, rotation=rotation,
horizontalalignment='center',
verticalalignment='center')
return t
def _get_label_clabeltext(self, x, y, rotation):
# x, y, rotation is given in pixel coordinate. Convert them to
# the data coordinate and create a label using ClabelText
# class. This way, the roation of the clabel is along the
# contour line always.
transDataInv = self.ax.transData.inverted()
dx, dy = transDataInv.transform_point((x, y))
drotation = transDataInv.transform_angles(np.array([rotation]),
np.array([[x, y]]))
t = ClabelText(dx, dy, rotation=drotation[0],
horizontalalignment='center',
verticalalignment='center')
return t
def _add_label(self, t, x, y, lev, cvalue):
color = self.labelMappable.to_rgba(cvalue, alpha=self.alpha)
_text = self.get_text(lev, self.labelFmt)
self.set_label_props(t, _text, color)
self.labelTexts.append(t)
self.labelCValues.append(cvalue)
self.labelXYs.append((x, y))
# Add label to plot here - useful for manual mode label selection
self.ax.add_artist(t)
def add_label(self, x, y, rotation, lev, cvalue):
"""
Add contour label using :class:`~matplotlib.text.Text` class.
"""
t = self._get_label_text(x, y, rotation)
self._add_label(t, x, y, lev, cvalue)
def add_label_clabeltext(self, x, y, rotation, lev, cvalue):
"""
Add contour label using :class:`ClabelText` class.
"""
# x, y, rotation is given in pixel coordinate. Convert them to
# the data coordinate and create a label using ClabelText
# class. This way, the roation of the clabel is along the
# contour line always.
t = self._get_label_clabeltext(x, y, rotation)
self._add_label(t, x, y, lev, cvalue)
def add_label_near(self, x, y, inline=True, inline_spacing=5,
transform=None):
"""
Add a label near the point (x, y). If transform is None
(default), (x, y) is in data coordinates; if transform is
False, (x, y) is in display coordinates; otherwise, the
specified transform will be used to translate (x, y) into
display coordinates.
*inline*:
controls whether the underlying contour is removed or
not. Default is *True*.
*inline_spacing*:
space in pixels to leave on each side of label when
placing inline. Defaults to 5. This spacing will be
exact for labels at locations where the contour is
straight, less so for labels on curved contours.
"""
if transform is None:
transform = self.ax.transData
if transform:
x, y = transform.transform_point((x, y))
# find the nearest contour _in screen units_
conmin, segmin, imin, xmin, ymin = self.find_nearest_contour(
x, y, self.labelIndiceList)[:5]
# The calc_label_rot_and_inline routine requires that (xmin,ymin)
# be a vertex in the path. So, if it isn't, add a vertex here
# grab the paths from the collections
paths = self.collections[conmin].get_paths()
# grab the correct segment
active_path = paths[segmin]
# grab it's verticies
lc = active_path.vertices
# sort out where the new vertex should be added data-units
xcmin = self.ax.transData.inverted().transform_point([xmin, ymin])
# if there isn't a vertex close enough
if not np.allclose(xcmin, lc[imin]):
# insert new data into the vertex list
lc = np.r_[lc[:imin], np.array(xcmin)[None, :], lc[imin:]]
# replace the path with the new one
paths[segmin] = mpath.Path(lc)
# Get index of nearest level in subset of levels used for labeling
lmin = self.labelIndiceList.index(conmin)
# Coordinates of contour
paths = self.collections[conmin].get_paths()
lc = paths[segmin].vertices
# In pixel/screen space
slc = self.ax.transData.transform(lc)
# Get label width for rotating labels and breaking contours
lw = self.get_label_width(self.labelLevelList[lmin],
self.labelFmt, self.labelFontSizeList[lmin])
# Figure out label rotation.
if inline:
lcarg = lc
else:
lcarg = None
rotation, nlc = self.calc_label_rot_and_inline(
slc, imin, lw, lcarg,
inline_spacing)
self.add_label(xmin, ymin, rotation, self.labelLevelList[lmin],
self.labelCValueList[lmin])
if inline:
# Remove old, not looping over paths so we can do this up front
paths.pop(segmin)
# Add paths if not empty or single point
for n in nlc:
if len(n) > 1:
paths.append(mpath.Path(n))
def pop_label(self, index=-1):
"""Defaults to removing last label, but any index can be supplied"""
self.labelCValues.pop(index)
t = self.labelTexts.pop(index)
t.remove()
def labels(self, inline, inline_spacing):
if self._use_clabeltext:
add_label = self.add_label_clabeltext
else:
add_label = self.add_label
for icon, lev, fsize, cvalue in zip(
self.labelIndiceList, self.labelLevelList,
self.labelFontSizeList, self.labelCValueList):
con = self.collections[icon]
trans = con.get_transform()
lw = self.get_label_width(lev, self.labelFmt, fsize)
lw *= self.ax.figure.dpi / 72.0 # scale to screen coordinates
additions = []
paths = con.get_paths()
for segNum, linepath in enumerate(paths):
lc = linepath.vertices # Line contour
slc0 = trans.transform(lc) # Line contour in screen coords
# For closed polygons, add extra point to avoid division by
# zero in print_label and locate_label. Other than these
# functions, this is not necessary and should probably be
# eventually removed.
if mlab.is_closed_polygon(lc):
slc = np.r_[slc0, slc0[1:2, :]]
else:
slc = slc0
# Check if long enough for a label
if self.print_label(slc, lw):
x, y, ind = self.locate_label(slc, lw)
if inline:
lcarg = lc
else:
lcarg = None
rotation, new = self.calc_label_rot_and_inline(
slc0, ind, lw, lcarg,
inline_spacing)
# Actually add the label
add_label(x, y, rotation, lev, cvalue)
# If inline, add new contours
if inline:
for n in new:
# Add path if not empty or single point
if len(n) > 1:
additions.append(mpath.Path(n))
else: # If not adding label, keep old path
additions.append(linepath)
# After looping over all segments on a contour, remove old
# paths and add new ones if inlining
if inline:
del paths[:]
paths.extend(additions)
def _find_closest_point_on_leg(p1, p2, p0):
"""find closest point to p0 on line segment connecting p1 and p2"""
# handle degenerate case
if np.all(p2 == p1):
d = np.sum((p0 - p1)**2)
return d, p1
d21 = p2 - p1
d01 = p0 - p1
# project on to line segment to find closest point
proj = np.dot(d01, d21) / np.dot(d21, d21)
if proj < 0:
proj = 0
if proj > 1:
proj = 1
pc = p1 + proj * d21
# find squared distance
d = np.sum((pc-p0)**2)
return d, pc
def _find_closest_point_on_path(lc, point):
"""
lc: coordinates of vertices
point: coordinates of test point
"""
# find index of closest vertex for this segment
ds = np.sum((lc - point[None, :])**2, 1)
imin = np.argmin(ds)
dmin = np.inf
xcmin = None
legmin = (None, None)
closed = mlab.is_closed_polygon(lc)
# build list of legs before and after this vertex
legs = []
if imin > 0 or closed:
legs.append(((imin-1) % len(lc), imin))
if imin < len(lc) - 1 or closed:
legs.append((imin, (imin+1) % len(lc)))
for leg in legs:
d, xc = _find_closest_point_on_leg(lc[leg[0]], lc[leg[1]], point)
if d < dmin:
dmin = d
xcmin = xc
legmin = leg
return (dmin, xcmin, legmin)
class ContourSet(cm.ScalarMappable, ContourLabeler):
"""
Store a set of contour lines or filled regions.
User-callable method: clabel
Useful attributes:
ax:
The axes object in which the contours are drawn
collections:
a silent_list of LineCollections or PolyCollections
levels:
contour levels
layers:
same as levels for line contours; half-way between
levels for filled contours. See :meth:`_process_colors`.
"""
def __init__(self, ax, *args, **kwargs):
"""
Draw contour lines or filled regions, depending on
whether keyword arg 'filled' is *False* (default) or *True*.
The first three arguments must be:
*ax*: axes object.
*levels*: [level0, level1, ..., leveln]
A list of floating point numbers indicating the contour
levels.
*allsegs*: [level0segs, level1segs, ...]
List of all the polygon segments for all the *levels*.
For contour lines ``len(allsegs) == len(levels)``, and for
filled contour regions ``len(allsegs) = len(levels)-1``.
level0segs = [polygon0, polygon1, ...]
polygon0 = array_like [[x0,y0], [x1,y1], ...]
*allkinds*: *None* or [level0kinds, level1kinds, ...]
Optional list of all the polygon vertex kinds (code types), as
described and used in Path. This is used to allow multiply-
connected paths such as holes within filled polygons.
If not *None*, len(allkinds) == len(allsegs).
level0kinds = [polygon0kinds, ...]
polygon0kinds = [vertexcode0, vertexcode1, ...]
If *allkinds* is not *None*, usually all polygons for a particular
contour level are grouped together so that
level0segs = [polygon0] and level0kinds = [polygon0kinds].
Keyword arguments are as described in
:class:`~matplotlib.contour.QuadContourSet` object.
**Examples:**
.. plot:: mpl_examples/misc/contour_manual.py
"""
self.ax = ax
self.levels = kwargs.get('levels', None)
self.filled = kwargs.get('filled', False)
self.linewidths = kwargs.get('linewidths', None)
self.linestyles = kwargs.get('linestyles', None)
self.hatches = kwargs.get('hatches', [None])
self.alpha = kwargs.get('alpha', None)
self.origin = kwargs.get('origin', None)
self.extent = kwargs.get('extent', None)
cmap = kwargs.get('cmap', None)
self.colors = kwargs.get('colors', None)
norm = kwargs.get('norm', None)
vmin = kwargs.get('vmin', None)
vmax = kwargs.get('vmax', None)
self.extend = kwargs.get('extend', 'neither')
self.antialiased = kwargs.get('antialiased', None)
if self.antialiased is None and self.filled:
self.antialiased = False # eliminate artifacts; we are not
# stroking the boundaries.
# The default for line contours will be taken from
# the LineCollection default, which uses the
# rcParams['lines.antialiased']
self.nchunk = kwargs.get('nchunk', 0)
self.locator = kwargs.get('locator', None)
if (isinstance(norm, colors.LogNorm)
or isinstance(self.locator, ticker.LogLocator)):
self.logscale = True
if norm is None:
norm = colors.LogNorm()
if self.extend is not 'neither':
raise ValueError('extend kwarg does not work yet with log '
' scale')
else:
self.logscale = False
if self.origin not in [None, 'lower', 'upper', 'image']:
raise ValueError("If given, *origin* must be one of [ 'lower' |"
" 'upper' | 'image']")
if self.extent is not None and len(self.extent) != 4:
raise ValueError("If given, *extent* must be '[ *None* |"
" (x0,x1,y0,y1) ]'")
if self.colors is not None and cmap is not None:
raise ValueError('Either colors or cmap must be None')
if self.origin == 'image':
self.origin = mpl.rcParams['image.origin']
self._transform = kwargs.get('transform', None)
self._process_args(*args, **kwargs)
self._process_levels()
if self.colors is not None:
ncolors = len(self.levels)
if self.filled:
ncolors -= 1
i0 = 0
# Handle the case where colors are given for the extended
# parts of the contour.
extend_min = self.extend in ['min', 'both']
extend_max = self.extend in ['max', 'both']
use_set_under_over = False
# if we are extending the lower end, and we've been given enough
# colors then skip the first color in the resulting cmap. For the
# extend_max case we don't need to worry about passing more colors
# than ncolors as ListedColormap will clip.
total_levels = ncolors + int(extend_min) + int(extend_max)
if (len(self.colors) == total_levels and
any([extend_min, extend_max])):
use_set_under_over = True
if extend_min:
i0 = 1
cmap = colors.ListedColormap(self.colors[i0:None], N=ncolors)
if use_set_under_over:
if extend_min:
cmap.set_under(self.colors[0])
if extend_max:
cmap.set_over(self.colors[-1])
if self.filled:
self.collections = cbook.silent_list('mcoll.PathCollection')
else:
self.collections = cbook.silent_list('mcoll.LineCollection')
# label lists must be initialized here
self.labelTexts = []
self.labelCValues = []
kw = {'cmap': cmap}
if norm is not None:
kw['norm'] = norm
# sets self.cmap, norm if needed;
cm.ScalarMappable.__init__(self, **kw)
if vmin is not None:
self.norm.vmin = vmin
if vmax is not None:
self.norm.vmax = vmax
self._process_colors()
self.allsegs, self.allkinds = self._get_allsegs_and_allkinds()
if self.filled:
if self.linewidths is not None:
warnings.warn('linewidths is ignored by contourf')
# Lower and upper contour levels.
lowers, uppers = self._get_lowers_and_uppers()
# Ensure allkinds can be zipped below.
if self.allkinds is None:
self.allkinds = [None] * len(self.allsegs)
for level, level_upper, segs, kinds in \
zip(lowers, uppers, self.allsegs, self.allkinds):
paths = self._make_paths(segs, kinds)
# Default zorder taken from Collection
zorder = kwargs.get('zorder', 1)
col = mcoll.PathCollection(
paths,
antialiaseds=(self.antialiased,),
edgecolors='none',
alpha=self.alpha,
transform=self.get_transform(),
zorder=zorder)
self.ax.add_collection(col, autolim=False)
self.collections.append(col)
else:
tlinewidths = self._process_linewidths()
self.tlinewidths = tlinewidths
tlinestyles = self._process_linestyles()
aa = self.antialiased
if aa is not None:
aa = (self.antialiased,)
for level, width, lstyle, segs in \
zip(self.levels, tlinewidths, tlinestyles, self.allsegs):
# Default zorder taken from LineCollection
zorder = kwargs.get('zorder', 2)
col = mcoll.LineCollection(
segs,
antialiaseds=aa,
linewidths=width,
linestyles=[lstyle],
alpha=self.alpha,
transform=self.get_transform(),
zorder=zorder)
col.set_label('_nolegend_')
self.ax.add_collection(col, autolim=False)
self.collections.append(col)
self.changed() # set the colors
def get_transform(self):
"""
Return the :class:`~matplotlib.transforms.Transform`
instance used by this ContourSet.
"""
if self._transform is None:
self._transform = self.ax.transData
elif (not isinstance(self._transform, mtrans.Transform)
and hasattr(self._transform, '_as_mpl_transform')):
self._transform = self._transform._as_mpl_transform(self.ax)
return self._transform
def __getstate__(self):
state = self.__dict__.copy()
# the C object _contour_generator cannot currently be pickled. This
# isn't a big issue as it is not actually used once the contour has
# been calculated.
state['_contour_generator'] = None
return state
def legend_elements(self, variable_name='x', str_format=str):
"""
Return a list of artist and labels suitable for passing through
to :func:`plt.legend` which represent this ContourSet.
Args:
*variable_name*: the string used inside the inequality used
on the labels
*str_format*: function used to format the numbers in the labels
"""
artists = []
labels = []
if self.filled:
lowers, uppers = self._get_lowers_and_uppers()
n_levels = len(self.collections)
for i, (collection, lower, upper) in enumerate(
zip(self.collections, lowers, uppers)):
patch = mpatches.Rectangle(
(0, 0), 1, 1,
facecolor=collection.get_facecolor()[0],
hatch=collection.get_hatch(),
alpha=collection.get_alpha())
artists.append(patch)
lower = str_format(lower)
upper = str_format(upper)
if i == 0 and self.extend in ('min', 'both'):
labels.append(r'$%s \leq %s$' % (variable_name,
lower))
elif i == n_levels - 1 and self.extend in ('max', 'both'):
labels.append(r'$%s > %s$' % (variable_name,
upper))
else:
labels.append(r'$%s < %s \leq %s$' % (lower,
variable_name,
upper))
else:
for collection, level in zip(self.collections, self.levels):
patch = mcoll.LineCollection(None)
patch.update_from(collection)
artists.append(patch)
# format the level for insertion into the labels
level = str_format(level)
labels.append(r'$%s = %s$' % (variable_name, level))
return artists, labels
def _process_args(self, *args, **kwargs):
"""
Process *args* and *kwargs*; override in derived classes.
Must set self.levels, self.zmin and self.zmax, and update axes
limits.
"""
self.levels = args[0]
self.allsegs = args[1]
self.allkinds = len(args) > 2 and args[2] or None
self.zmax = np.amax(self.levels)
self.zmin = np.amin(self.levels)
self._auto = False
# Check lengths of levels and allsegs.
if self.filled:
if len(self.allsegs) != len(self.levels) - 1:
raise ValueError('must be one less number of segments as '
'levels')
else:
if len(self.allsegs) != len(self.levels):
raise ValueError('must be same number of segments as levels')
# Check length of allkinds.
if (self.allkinds is not None and
len(self.allkinds) != len(self.allsegs)):
raise ValueError('allkinds has different length to allsegs')
# Determine x,y bounds and update axes data limits.
havelimits = False
for segs in self.allsegs:
for seg in segs:
seg = np.asarray(seg)
if havelimits:
min = np.minimum(min, seg.min(axis=0))
max = np.maximum(max, seg.max(axis=0))
else:
min = seg.min(axis=0)
max = seg.max(axis=0)
havelimits = True
if havelimits:
self.ax.update_datalim([min, max])
self.ax.autoscale_view(tight=True)
def _get_allsegs_and_allkinds(self):
"""
Override in derived classes to create and return allsegs and allkinds.
allkinds can be None.
"""
return self.allsegs, self.allkinds
def _get_lowers_and_uppers(self):
"""
Return (lowers,uppers) for filled contours.
"""
lowers = self._levels[:-1]
if self.zmin == lowers[0]:
# Include minimum values in lowest interval
lowers = lowers.copy() # so we don't change self._levels
if self.logscale:
lowers[0] = 0.99 * self.zmin
else:
lowers[0] -= 1
uppers = self._levels[1:]
return (lowers, uppers)
def _make_paths(self, segs, kinds):
if kinds is not None:
return [mpath.Path(seg, codes=kind)
for seg, kind in zip(segs, kinds)]
else:
return [mpath.Path(seg) for seg in segs]
def changed(self):
tcolors = [(tuple(rgba),)
for rgba in self.to_rgba(self.cvalues, alpha=self.alpha)]
self.tcolors = tcolors
hatches = self.hatches * len(tcolors)
for color, hatch, collection in zip(tcolors, hatches,
self.collections):
if self.filled:
collection.set_facecolor(color)
# update the collection's hatch (may be None)
collection.set_hatch(hatch)
else:
collection.set_color(color)
for label, cv in zip(self.labelTexts, self.labelCValues):
label.set_alpha(self.alpha)
label.set_color(self.labelMappable.to_rgba(cv))
# add label colors
cm.ScalarMappable.changed(self)
def _autolev(self, z, N):
"""
Select contour levels to span the data.
We need two more levels for filled contours than for
line contours, because for the latter we need to specify
the lower and upper boundary of each range. For example,
a single contour boundary, say at z = 0, requires only
one contour line, but two filled regions, and therefore
three levels to provide boundaries for both regions.
"""
if self.locator is None:
if self.logscale:
self.locator = ticker.LogLocator()
else:
self.locator = ticker.MaxNLocator(N + 1)
zmax = self.zmax
zmin = self.zmin
lev = self.locator.tick_values(zmin, zmax)
self._auto = True
if self.filled:
return lev
# For line contours, drop levels outside the data range.
return lev[(lev > zmin) & (lev < zmax)]
def _contour_level_args(self, z, args):
"""
Determine the contour levels and store in self.levels.
"""
if self.filled:
fn = 'contourf'
else:
fn = 'contour'
self._auto = False
if self.levels is None:
if len(args) == 0:
lev = self._autolev(z, 7)
else:
level_arg = args[0]
try:
if type(level_arg) == int:
lev = self._autolev(z, level_arg)
else:
lev = np.asarray(level_arg).astype(np.float64)
except:
raise TypeError(
"Last %s arg must give levels; see help(%s)" %
(fn, fn))
self.levels = lev
if self.filled and len(self.levels) < 2:
raise ValueError("Filled contours require at least 2 levels.")
if len(self.levels) > 1 and np.amin(np.diff(self.levels)) <= 0.0:
if hasattr(self, '_corner_mask') and self._corner_mask == 'legacy':
warnings.warn("Contour levels are not increasing")
else:
raise ValueError("Contour levels must be increasing")
def _process_levels(self):
"""
Assign values to :attr:`layers` based on :attr:`levels`,
adding extended layers as needed if contours are filled.
For line contours, layers simply coincide with levels;
a line is a thin layer. No extended levels are needed
with line contours.
"""
# The following attributes are no longer needed, and
# should be deprecated and removed to reduce confusion.
self.vmin = np.amin(self.levels)
self.vmax = np.amax(self.levels)
# Make a private _levels to include extended regions; we
# want to leave the original levels attribute unchanged.
# (Colorbar needs this even for line contours.)
self._levels = list(self.levels)
if self.extend in ('both', 'min'):
self._levels.insert(0, min(self.levels[0], self.zmin) - 1)
if self.extend in ('both', 'max'):
self._levels.append(max(self.levels[-1], self.zmax) + 1)
self._levels = np.asarray(self._levels)
if not self.filled:
self.layers = self.levels
return
# layer values are mid-way between levels
self.layers = 0.5 * (self._levels[:-1] + self._levels[1:])
# ...except that extended layers must be outside the
# normed range:
if self.extend in ('both', 'min'):
self.layers[0] = -np.inf
if self.extend in ('both', 'max'):
self.layers[-1] = np.inf
def _process_colors(self):
"""
Color argument processing for contouring.
Note that we base the color mapping on the contour levels
and layers, not on the actual range of the Z values. This
means we don't have to worry about bad values in Z, and we
always have the full dynamic range available for the selected
levels.
The color is based on the midpoint of the layer, except for
extended end layers. By default, the norm vmin and vmax
are the extreme values of the non-extended levels. Hence,
the layer color extremes are not the extreme values of
the colormap itself, but approach those values as the number
of levels increases. An advantage of this scheme is that
line contours, when added to filled contours, take on
colors that are consistent with those of the filled regions;
for example, a contour line on the boundary between two
regions will have a color intermediate between those
of the regions.
"""
self.monochrome = self.cmap.monochrome
if self.colors is not None:
# Generate integers for direct indexing.
i0, i1 = 0, len(self.levels)
if self.filled:
i1 -= 1
# Out of range indices for over and under:
if self.extend in ('both', 'min'):
i0 = -1
if self.extend in ('both', 'max'):
i1 += 1
self.cvalues = list(range(i0, i1))
self.set_norm(colors.NoNorm())
else:
self.cvalues = self.layers
self.set_array(self.levels)
self.autoscale_None()
if self.extend in ('both', 'max', 'min'):
self.norm.clip = False
# self.tcolors are set by the "changed" method
def _process_linewidths(self):
linewidths = self.linewidths
Nlev = len(self.levels)
if linewidths is None:
tlinewidths = [(mpl.rcParams['lines.linewidth'],)] * Nlev
else:
if not cbook.iterable(linewidths):
linewidths = [linewidths] * Nlev
else:
linewidths = list(linewidths)
if len(linewidths) < Nlev:
nreps = int(np.ceil(Nlev / len(linewidths)))
linewidths = linewidths * nreps
if len(linewidths) > Nlev:
linewidths = linewidths[:Nlev]
tlinewidths = [(w,) for w in linewidths]
return tlinewidths
def _process_linestyles(self):
linestyles = self.linestyles
Nlev = len(self.levels)
if linestyles is None:
tlinestyles = ['solid'] * Nlev
if self.monochrome:
neg_ls = mpl.rcParams['contour.negative_linestyle']
eps = - (self.zmax - self.zmin) * 1e-15
for i, lev in enumerate(self.levels):
if lev < eps:
tlinestyles[i] = neg_ls
else:
if cbook.is_string_like(linestyles):
tlinestyles = [linestyles] * Nlev
elif cbook.iterable(linestyles):
tlinestyles = list(linestyles)
if len(tlinestyles) < Nlev:
nreps = int(np.ceil(Nlev / len(linestyles)))
tlinestyles = tlinestyles * nreps
if len(tlinestyles) > Nlev:
tlinestyles = tlinestyles[:Nlev]
else:
raise ValueError("Unrecognized type for linestyles kwarg")
return tlinestyles
def get_alpha(self):
"""returns alpha to be applied to all ContourSet artists"""
return self.alpha
def set_alpha(self, alpha):
"""sets alpha for all ContourSet artists"""
self.alpha = alpha
self.changed()
def find_nearest_contour(self, x, y, indices=None, pixel=True):
"""
Finds contour that is closest to a point. Defaults to
measuring distance in pixels (screen space - useful for manual
contour labeling), but this can be controlled via a keyword
argument.
Returns a tuple containing the contour, segment, index of
segment, x & y of segment point and distance to minimum point.
Call signature::
conmin,segmin,imin,xmin,ymin,dmin = find_nearest_contour(
self, x, y, indices=None, pixel=True )
Optional keyword arguments:
*indices*:
Indexes of contour levels to consider when looking for
nearest point. Defaults to using all levels.
*pixel*:
If *True*, measure distance in pixel space, if not, measure
distance in axes space. Defaults to *True*.
"""
# This function uses a method that is probably quite
# inefficient based on converting each contour segment to
# pixel coordinates and then comparing the given point to
# those coordinates for each contour. This will probably be
# quite slow for complex contours, but for normal use it works
# sufficiently well that the time is not noticeable.
# Nonetheless, improvements could probably be made.
if indices is None:
indices = list(xrange(len(self.levels)))
dmin = np.inf
conmin = None
segmin = None
xmin = None
ymin = None
point = np.array([x, y])
for icon in indices:
con = self.collections[icon]
trans = con.get_transform()
paths = con.get_paths()
for segNum, linepath in enumerate(paths):
lc = linepath.vertices
# transfer all data points to screen coordinates if desired
if pixel:
lc = trans.transform(lc)
d, xc, leg = _find_closest_point_on_path(lc, point)
if d < dmin:
dmin = d
conmin = icon
segmin = segNum
imin = leg[1]
xmin = xc[0]
ymin = xc[1]
return (conmin, segmin, imin, xmin, ymin, dmin)
class QuadContourSet(ContourSet):
"""
Create and store a set of contour lines or filled regions.
User-callable method: :meth:`clabel`
Useful attributes:
ax:
The axes object in which the contours are drawn
collections:
A silent_list of LineCollections or PolyCollections
levels:
Contour levels
layers:
Same as levels for line contours; half-way between
levels for filled contours. See :meth:`_process_colors` method.
"""
def __init__(self, ax, *args, **kwargs):
"""
Calculate and draw contour lines or filled regions, depending
on whether keyword arg 'filled' is False (default) or True.
The first argument of the initializer must be an axes
object. The remaining arguments and keyword arguments
are described in QuadContourSet.contour_doc.
"""
ContourSet.__init__(self, ax, *args, **kwargs)
def _process_args(self, *args, **kwargs):
"""
Process args and kwargs.
"""
if isinstance(args[0], QuadContourSet):
if self.levels is None:
self.levels = args[0].levels
self.zmin = args[0].zmin
self.zmax = args[0].zmax
self._corner_mask = args[0]._corner_mask
if self._corner_mask == 'legacy':
contour_generator = args[0].Cntr
else:
contour_generator = args[0]._contour_generator
else:
self._corner_mask = kwargs.get('corner_mask', None)
if self._corner_mask is None:
self._corner_mask = mpl.rcParams['contour.corner_mask']
x, y, z = self._contour_args(args, kwargs)
_mask = ma.getmask(z)
if _mask is ma.nomask or not _mask.any():
_mask = None
if self._corner_mask == 'legacy':
cbook.warn_deprecated('1.5',
name="corner_mask='legacy'",
alternative='corner_mask=False or True')
contour_generator = _cntr.Cntr(x, y, z.filled(), _mask)
else:
contour_generator = _contour.QuadContourGenerator(
x, y, z.filled(), _mask, self._corner_mask, self.nchunk)
t = self.get_transform()
# if the transform is not trans data, and some part of it
# contains transData, transform the xs and ys to data coordinates
if (t != self.ax.transData and
any(t.contains_branch_seperately(self.ax.transData))):
trans_to_data = t - self.ax.transData
pts = (np.vstack([x.flat, y.flat]).T)
transformed_pts = trans_to_data.transform(pts)
x = transformed_pts[..., 0]
y = transformed_pts[..., 1]
x0 = ma.minimum(x)
x1 = ma.maximum(x)
y0 = ma.minimum(y)
y1 = ma.maximum(y)
self.ax.update_datalim([(x0, y0), (x1, y1)])
self.ax.autoscale_view(tight=True)
if self._corner_mask == 'legacy':
self.Cntr = contour_generator
else:
self._contour_generator = contour_generator
def _get_allsegs_and_allkinds(self):
"""
Create and return allsegs and allkinds by calling underlying C code.
"""
allsegs = []
if self.filled:
lowers, uppers = self._get_lowers_and_uppers()
allkinds = []
for level, level_upper in zip(lowers, uppers):
if self._corner_mask == 'legacy':
nlist = self.Cntr.trace(level, level_upper,
nchunk=self.nchunk)
nseg = len(nlist) // 2
vertices = nlist[:nseg]
kinds = nlist[nseg:]
else:
vertices, kinds = \
self._contour_generator.create_filled_contour(
level, level_upper)
allsegs.append(vertices)
allkinds.append(kinds)
else:
allkinds = None
for level in self.levels:
if self._corner_mask == 'legacy':
nlist = self.Cntr.trace(level)
nseg = len(nlist) // 2
vertices = nlist[:nseg]
else:
vertices = self._contour_generator.create_contour(level)
allsegs.append(vertices)
return allsegs, allkinds
def _contour_args(self, args, kwargs):
if self.filled:
fn = 'contourf'
else:
fn = 'contour'
Nargs = len(args)
if Nargs <= 2:
z = ma.asarray(args[0], dtype=np.float64)
x, y = self._initialize_x_y(z)
args = args[1:]
elif Nargs <= 4:
x, y, z = self._check_xyz(args[:3], kwargs)
args = args[3:]
else:
raise TypeError("Too many arguments to %s; see help(%s)" %
(fn, fn))
z = ma.masked_invalid(z, copy=False)
self.zmax = ma.maximum(z)
self.zmin = ma.minimum(z)
if self.logscale and self.zmin <= 0:
z = ma.masked_where(z <= 0, z)
warnings.warn('Log scale: values of z <= 0 have been masked')
self.zmin = z.min()
self._contour_level_args(z, args)
return (x, y, z)
def _check_xyz(self, args, kwargs):
"""
For functions like contour, check that the dimensions
of the input arrays match; if x and y are 1D, convert
them to 2D using meshgrid.
Possible change: I think we should make and use an ArgumentError
Exception class (here and elsewhere).
"""
x, y = args[:2]
self.ax._process_unit_info(xdata=x, ydata=y, kwargs=kwargs)
x = self.ax.convert_xunits(x)
y = self.ax.convert_yunits(y)
x = np.asarray(x, dtype=np.float64)
y = np.asarray(y, dtype=np.float64)
z = ma.asarray(args[2], dtype=np.float64)
if z.ndim != 2:
raise TypeError("Input z must be a 2D array.")
else:
Ny, Nx = z.shape
if x.ndim != y.ndim:
raise TypeError("Number of dimensions of x and y should match.")
if x.ndim == 1:
nx, = x.shape
ny, = y.shape
if nx != Nx:
raise TypeError("Length of x must be number of columns in z.")
if ny != Ny:
raise TypeError("Length of y must be number of rows in z.")
x, y = np.meshgrid(x, y)
elif x.ndim == 2:
if x.shape != z.shape:
raise TypeError("Shape of x does not match that of z: found "
"{0} instead of {1}.".format(x.shape, z.shape))
if y.shape != z.shape:
raise TypeError("Shape of y does not match that of z: found "
"{0} instead of {1}.".format(y.shape, z.shape))
else:
raise TypeError("Inputs x and y must be 1D or 2D.")
return x, y, z
def _initialize_x_y(self, z):
"""
Return X, Y arrays such that contour(Z) will match imshow(Z)
if origin is not None.
The center of pixel Z[i,j] depends on origin:
if origin is None, x = j, y = i;
if origin is 'lower', x = j + 0.5, y = i + 0.5;
if origin is 'upper', x = j + 0.5, y = Nrows - i - 0.5
If extent is not None, x and y will be scaled to match,
as in imshow.
If origin is None and extent is not None, then extent
will give the minimum and maximum values of x and y.
"""
if z.ndim != 2:
raise TypeError("Input must be a 2D array.")
else:
Ny, Nx = z.shape
if self.origin is None: # Not for image-matching.
if self.extent is None:
return np.meshgrid(np.arange(Nx), np.arange(Ny))
else:
x0, x1, y0, y1 = self.extent
x = np.linspace(x0, x1, Nx)
y = np.linspace(y0, y1, Ny)
return np.meshgrid(x, y)
# Match image behavior:
if self.extent is None:
x0, x1, y0, y1 = (0, Nx, 0, Ny)
else:
x0, x1, y0, y1 = self.extent
dx = float(x1 - x0) / Nx
dy = float(y1 - y0) / Ny
x = x0 + (np.arange(Nx) + 0.5) * dx
y = y0 + (np.arange(Ny) + 0.5) * dy
if self.origin == 'upper':
y = y[::-1]
return np.meshgrid(x, y)
contour_doc = """
Plot contours.
:func:`~matplotlib.pyplot.contour` and
:func:`~matplotlib.pyplot.contourf` draw contour lines and
filled contours, respectively. Except as noted, function
signatures and return values are the same for both versions.
:func:`~matplotlib.pyplot.contourf` differs from the MATLAB
version in that it does not draw the polygon edges.
To draw edges, add line contours with
calls to :func:`~matplotlib.pyplot.contour`.
Call signatures::
contour(Z)
make a contour plot of an array *Z*. The level values are chosen
automatically.
::
contour(X,Y,Z)
*X*, *Y* specify the (x, y) coordinates of the surface
::
contour(Z,N)
contour(X,Y,Z,N)
contour up to *N* automatically-chosen levels.
::
contour(Z,V)
contour(X,Y,Z,V)
draw contour lines at the values specified in sequence *V*,
which must be in increasing order.
::
contourf(..., V)
fill the ``len(V)-1`` regions between the values in *V*,
which must be in increasing order.
::
contour(Z, **kwargs)
Use keyword args to control colors, linewidth, origin, cmap ... see
below for more details.
*X* and *Y* must both be 2-D with the same shape as *Z*, or they
must both be 1-D such that ``len(X)`` is the number of columns in
*Z* and ``len(Y)`` is the number of rows in *Z*.
``C = contour(...)`` returns a
:class:`~matplotlib.contour.QuadContourSet` object.
Optional keyword arguments:
*corner_mask*: [ *True* | *False* | 'legacy' ]
Enable/disable corner masking, which only has an effect if *Z* is
a masked array. If *False*, any quad touching a masked point is
masked out. If *True*, only the triangular corners of quads
nearest those points are always masked out, other triangular
corners comprising three unmasked points are contoured as usual.
If 'legacy', the old contouring algorithm is used, which is
equivalent to *False* and is deprecated, only remaining whilst the
new algorithm is tested fully.
If not specified, the default is taken from
rcParams['contour.corner_mask'], which is True unless it has
been modified.
*colors*: [ *None* | string | (mpl_colors) ]
If *None*, the colormap specified by cmap will be used.
If a string, like 'r' or 'red', all levels will be plotted in this
color.
If a tuple of matplotlib color args (string, float, rgb, etc),
different levels will be plotted in different colors in the order
specified.
*alpha*: float
The alpha blending value
*cmap*: [ *None* | Colormap ]
A cm :class:`~matplotlib.colors.Colormap` instance or
*None*. If *cmap* is *None* and *colors* is *None*, a
default Colormap is used.
*norm*: [ *None* | Normalize ]
A :class:`matplotlib.colors.Normalize` instance for
scaling data values to colors. If *norm* is *None* and
*colors* is *None*, the default linear scaling is used.
*vmin*, *vmax*: [ *None* | scalar ]
If not *None*, either or both of these values will be
supplied to the :class:`matplotlib.colors.Normalize`
instance, overriding the default color scaling based on
*levels*.
*levels*: [level0, level1, ..., leveln]
A list of floating point numbers indicating the level
curves to draw, in increasing order; e.g., to draw just
the zero contour pass ``levels=[0]``
*origin*: [ *None* | 'upper' | 'lower' | 'image' ]
If *None*, the first value of *Z* will correspond to the
lower left corner, location (0,0). If 'image', the rc
value for ``image.origin`` will be used.
This keyword is not active if *X* and *Y* are specified in
the call to contour.
*extent*: [ *None* | (x0,x1,y0,y1) ]
If *origin* is not *None*, then *extent* is interpreted as
in :func:`matplotlib.pyplot.imshow`: it gives the outer
pixel boundaries. In this case, the position of Z[0,0]
is the center of the pixel, not a corner. If *origin* is
*None*, then (*x0*, *y0*) is the position of Z[0,0], and
(*x1*, *y1*) is the position of Z[-1,-1].
This keyword is not active if *X* and *Y* are specified in
the call to contour.
*locator*: [ *None* | ticker.Locator subclass ]
If *locator* is *None*, the default
:class:`~matplotlib.ticker.MaxNLocator` is used. The
locator is used to determine the contour levels if they
are not given explicitly via the *V* argument.
*extend*: [ 'neither' | 'both' | 'min' | 'max' ]
Unless this is 'neither', contour levels are automatically
added to one or both ends of the range so that all data
are included. These added ranges are then mapped to the
special colormap values which default to the ends of the
colormap range, but can be set via
:meth:`matplotlib.colors.Colormap.set_under` and
:meth:`matplotlib.colors.Colormap.set_over` methods.
*xunits*, *yunits*: [ *None* | registered units ]
Override axis units by specifying an instance of a
:class:`matplotlib.units.ConversionInterface`.
*antialiased*: [ *True* | *False* ]
enable antialiasing, overriding the defaults. For
filled contours, the default is *True*. For line contours,
it is taken from rcParams['lines.antialiased'].
*nchunk*: [ 0 | integer ]
If 0, no subdivision of the domain. Specify a positive integer to
divide the domain into subdomains of *nchunk* by *nchunk* quads.
Chunking reduces the maximum length of polygons generated by the
contouring algorithm which reduces the rendering workload passed
on to the backend and also requires slightly less RAM. It can
however introduce rendering artifacts at chunk boundaries depending
on the backend, the *antialiased* flag and value of *alpha*.
contour-only keyword arguments:
*linewidths*: [ *None* | number | tuple of numbers ]
If *linewidths* is *None*, the default width in
``lines.linewidth`` in ``matplotlibrc`` is used.
If a number, all levels will be plotted with this linewidth.
If a tuple, different levels will be plotted with different
linewidths in the order specified.
*linestyles*: [ *None* | 'solid' | 'dashed' | 'dashdot' | 'dotted' ]
If *linestyles* is *None*, the default is 'solid' unless
the lines are monochrome. In that case, negative
contours will take their linestyle from the ``matplotlibrc``
``contour.negative_linestyle`` setting.
*linestyles* can also be an iterable of the above strings
specifying a set of linestyles to be used. If this
iterable is shorter than the number of contour levels
it will be repeated as necessary.
contourf-only keyword arguments:
*hatches*:
A list of cross hatch patterns to use on the filled areas.
If None, no hatching will be added to the contour.
Hatching is supported in the PostScript, PDF, SVG and Agg
backends only.
Note: contourf fills intervals that are closed at the top; that
is, for boundaries *z1* and *z2*, the filled region is::
z1 < z <= z2
There is one exception: if the lowest boundary coincides with
the minimum value of the *z* array, then that minimum value
will be included in the lowest interval.
**Examples:**
.. plot:: mpl_examples/pylab_examples/contour_demo.py
.. plot:: mpl_examples/pylab_examples/contourf_demo.py
.. plot:: mpl_examples/pylab_examples/contour_corner_mask.py
"""
| apache-2.0 |
sshleifer/object_detection_kitti | learned_optimizer/problems/datasets.py | 7 | 7404 | # Copyright 2017 Google, Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functions to generate or load datasets for supervised learning."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from collections import namedtuple
import numpy as np
from sklearn.datasets import make_classification
MAX_SEED = 4294967295
class Dataset(namedtuple("Dataset", "data labels")):
"""Helper class for managing a supervised learning dataset.
Args:
data: an array of type float32 with N samples, each of which is the set
of features for that sample. (Shape (N, D_i), where N is the number of
samples and D_i is the number of features for that sample.)
labels: an array of type int32 or int64 with N elements, indicating the
class label for the corresponding set of features in data.
"""
# Since this is an immutable object, we don't need to reserve slots.
__slots__ = ()
@property
def size(self):
"""Dataset size (number of samples)."""
return len(self.data)
def batch_indices(self, num_batches, batch_size):
"""Creates indices of shuffled minibatches.
Args:
num_batches: the number of batches to generate
batch_size: the size of each batch
Returns:
batch_indices: a list of minibatch indices, arranged so that the dataset
is randomly shuffled.
Raises:
ValueError: if the data and labels have different lengths
"""
if len(self.data) != len(self.labels):
raise ValueError("Labels and data must have the same number of samples.")
batch_indices = []
# Follows logic in mnist.py to ensure we cover the entire dataset.
index_in_epoch = 0
dataset_size = len(self.data)
dataset_indices = np.arange(dataset_size)
np.random.shuffle(dataset_indices)
for _ in range(num_batches):
start = index_in_epoch
index_in_epoch += batch_size
if index_in_epoch > dataset_size:
# Finished epoch, reshuffle.
np.random.shuffle(dataset_indices)
# Start next epoch.
start = 0
index_in_epoch = batch_size
end = index_in_epoch
batch_indices.append(dataset_indices[start:end].tolist())
return batch_indices
def noisy_parity_class(n_samples,
n_classes=2,
n_context_ids=5,
noise_prob=0.25,
random_seed=None):
"""Returns a randomly generated sparse-to-sparse dataset.
The label is a parity class of a set of context classes.
Args:
n_samples: number of samples (data points)
n_classes: number of class labels (default: 2)
n_context_ids: how many classes to take the parity of (default: 5).
noise_prob: how often to corrupt the label (default: 0.25)
random_seed: seed used for drawing the random data (default: None)
Returns:
dataset: A Dataset namedtuple containing the generated data and labels
"""
np.random.seed(random_seed)
x = np.random.randint(0, n_classes, [n_samples, n_context_ids])
noise = np.random.binomial(1, noise_prob, [n_samples])
y = (np.sum(x, 1) + noise) % n_classes
return Dataset(x.astype("float32"), y.astype("int32"))
def random(n_features, n_samples, n_classes=2, sep=1.0, random_seed=None):
"""Returns a randomly generated classification dataset.
Args:
n_features: number of features (dependent variables)
n_samples: number of samples (data points)
n_classes: number of class labels (default: 2)
sep: separation of the two classes, a higher value corresponds to
an easier classification problem (default: 1.0)
random_seed: seed used for drawing the random data (default: None)
Returns:
dataset: A Dataset namedtuple containing the generated data and labels
"""
# Generate the problem data.
x, y = make_classification(n_samples=n_samples,
n_features=n_features,
n_informative=n_features,
n_redundant=0,
n_classes=n_classes,
class_sep=sep,
random_state=random_seed)
return Dataset(x.astype("float32"), y.astype("int32"))
def random_binary(n_features, n_samples, random_seed=None):
"""Returns a randomly generated dataset of binary values.
Args:
n_features: number of features (dependent variables)
n_samples: number of samples (data points)
random_seed: seed used for drawing the random data (default: None)
Returns:
dataset: A Dataset namedtuple containing the generated data and labels
"""
random_seed = (np.random.randint(MAX_SEED) if random_seed is None
else random_seed)
np.random.seed(random_seed)
x = np.random.randint(2, size=(n_samples, n_features))
y = np.zeros((n_samples, 1))
return Dataset(x.astype("float32"), y.astype("int32"))
def random_symmetric(n_features, n_samples, random_seed=None):
"""Returns a randomly generated dataset of values and their negatives.
Args:
n_features: number of features (dependent variables)
n_samples: number of samples (data points)
random_seed: seed used for drawing the random data (default: None)
Returns:
dataset: A Dataset namedtuple containing the generated data and labels
"""
random_seed = (np.random.randint(MAX_SEED) if random_seed is None
else random_seed)
np.random.seed(random_seed)
x1 = np.random.normal(size=(int(n_samples/2), n_features))
x = np.concatenate((x1, -x1), axis=0)
y = np.zeros((n_samples, 1))
return Dataset(x.astype("float32"), y.astype("int32"))
def random_mlp(n_features, n_samples, random_seed=None, n_layers=6, width=20):
"""Returns a generated output of an MLP with random weights.
Args:
n_features: number of features (dependent variables)
n_samples: number of samples (data points)
random_seed: seed used for drawing the random data (default: None)
n_layers: number of layers in random MLP
width: width of the layers in random MLP
Returns:
dataset: A Dataset namedtuple containing the generated data and labels
"""
random_seed = (np.random.randint(MAX_SEED) if random_seed is None
else random_seed)
np.random.seed(random_seed)
x = np.random.normal(size=(n_samples, n_features))
y = x
n_in = n_features
scale_factor = np.sqrt(2.) / np.sqrt(n_features)
for _ in range(n_layers):
weights = np.random.normal(size=(n_in, width)) * scale_factor
y = np.dot(y, weights).clip(min=0)
n_in = width
y = y[:, 0]
y[y > 0] = 1
return Dataset(x.astype("float32"), y.astype("int32"))
EMPTY_DATASET = Dataset(np.array([], dtype="float32"),
np.array([], dtype="int32"))
| apache-2.0 |
shusenl/scikit-learn | examples/ensemble/plot_voting_decision_regions.py | 230 | 2386 | """
==================================================
Plot the decision boundaries of a VotingClassifier
==================================================
Plot the decision boundaries of a `VotingClassifier` for
two features of the Iris dataset.
Plot the class probabilities of the first sample in a toy dataset
predicted by three different classifiers and averaged by the
`VotingClassifier`.
First, three examplary classifiers are initialized (`DecisionTreeClassifier`,
`KNeighborsClassifier`, and `SVC`) and used to initialize a
soft-voting `VotingClassifier` with weights `[2, 1, 2]`, which means that
the predicted probabilities of the `DecisionTreeClassifier` and `SVC`
count 5 times as much as the weights of the `KNeighborsClassifier` classifier
when the averaged probability is calculated.
"""
print(__doc__)
from itertools import product
import numpy as np
import matplotlib.pyplot as plt
from sklearn import datasets
from sklearn.tree import DecisionTreeClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.svm import SVC
from sklearn.ensemble import VotingClassifier
# Loading some example data
iris = datasets.load_iris()
X = iris.data[:, [0, 2]]
y = iris.target
# Training classifiers
clf1 = DecisionTreeClassifier(max_depth=4)
clf2 = KNeighborsClassifier(n_neighbors=7)
clf3 = SVC(kernel='rbf', probability=True)
eclf = VotingClassifier(estimators=[('dt', clf1), ('knn', clf2),
('svc', clf3)],
voting='soft', weights=[2, 1, 2])
clf1.fit(X, y)
clf2.fit(X, y)
clf3.fit(X, y)
eclf.fit(X, y)
# Plotting decision regions
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, 0.1),
np.arange(y_min, y_max, 0.1))
f, axarr = plt.subplots(2, 2, sharex='col', sharey='row', figsize=(10, 8))
for idx, clf, tt in zip(product([0, 1], [0, 1]),
[clf1, clf2, clf3, eclf],
['Decision Tree (depth=4)', 'KNN (k=7)',
'Kernel SVM', 'Soft Voting']):
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
axarr[idx[0], idx[1]].contourf(xx, yy, Z, alpha=0.4)
axarr[idx[0], idx[1]].scatter(X[:, 0], X[:, 1], c=y, alpha=0.8)
axarr[idx[0], idx[1]].set_title(tt)
plt.show()
| bsd-3-clause |
ianknowles/EarTimeWrangler | src/PDF/table_transformer.py | 1 | 19834 | import logging
import math
import os
from collections import defaultdict
import pdfminer
from pdfminer.converter import PDFPageAggregator
from pdfminer.layout import LAParams
from pdfminer.pdfinterp import PDFResourceManager, PDFPageInterpreter
from pdfminer.pdfinterp import PDFTextExtractionNotAllowed
from pdfminer.pdfparser import PDFParser, PDFDocument
import PDF.rectangles
import eartime_table
logger = logging.getLogger('meeting_parser').getChild(__name__)
def extract_table(pdf_path):
layouts = get_page_layouts(pdf_path)
tables = [page_to_tables(page_layout) for page_layout in layouts]
stitched = new_stitch(tables)
return table_review2(stitched)
def new_stitch(tables):
main_table = []
for page in tables:
for n, table in enumerate(page):
# try to join lines that run over table breaks
if (n == 0 and
#len(table['title']) == 0 and
# len(list(filter(lambda s: s == '', table[1][0]))) > 0 and
table['cells'][0][0] == '' and
#table['cells'][0][1] == '' and
#table['cells'][0][3] == '' and
len(main_table) > 0):
logger.info('Stitching rows:' + str(main_table[-1]) + str(table))
rep = main_table[-1]['cells'][-1][0]
if any(map(lambda x: x == '', table['cells'][0][1:])):
for i in range(min(len(main_table[-1]['cells'][-1]), len(table['cells'][0]))):
main_table[-1]['cells'][-1][i] += ' '
main_table[-1]['cells'][-1][i] += table['cells'][0][i]
for row in table['cells'][1:]:
main_table[-1]['cells'].append(row)
if main_table[-1]['cells'][-1][0] == '':
main_table[-1]['cells'][-1][0] = rep
else:
for row in table['cells'][1:]:
main_table[-1]['cells'].append(row)
if main_table[-1]['cells'][-1][0] == '':
main_table[-1]['cells'][-1][0] = rep
logger.info('Stitched result:' + str(main_table[-1]))
else:
main_table.append(table)
return main_table
def table_review2(tables):
tabs = []
for tab in tables:
tabs.append(eartime_table.Table(tab['title'], tab['cells']))
return tabs
def table_review(tables):
for table in tables:
# can we do this without copying the cells? pointer to first row is easy, pointer to the rest?
cells = list(table['cells'])
table['header'] = cells.pop(0)
table['rows'] = cells
table['tabletype'] = 'unknown'
#first = table['header'][0].lower()
#print(first)
finds = table['header'][0].lower().find('date')
if table['header'][0].lower().find('date') >= 0 or (table['header'][0].lower() == 'date of meeting'):
#if len(main_table2[-1]['rows']) > 0:
# if 'title' not in main_table2[-1]:
# main_table2[-1]['title'] = ['']
# logger.warning('Warning could not find title:' + str(main_table2[-1]))
# main_table2.append({})
# main_table2[-1]['title'] = main_table2[-2]['title']
# main_table2[-1]['header'] = []
# main_table2[-1]['rows'] = []
# main_table2[-1]['tabletype'] = main_table2[-2]['tabletype']
if len(table['header']) >= 3:
if table['header'][2].lower().strip() == 'purpose of meeting':
logger.info('Probably found a meeting header row:' + str(table['header']))
table['tabletype'] = 'meeting'
#for row in table['rows']:
# if all(map(lambda s: s == '', row)):
# logger.debug('Discarded empty row')
# table['rows']
table['rows'] = list(filter(lambda r: not all(map(lambda s: s == '', r)), table['rows']))
#elif len(list(filter(lambda s: s == '', row))) > 0:
# logger.warning('Discarded partial row, need to review:' + str(row))
#elif len(row) == 1:
# logger.info('Probably found a title:' + str(row))
# if len(main_table2[-1]['rows']) > 0:
# main_table2.append({})
# main_table2[-1]['header'] = main_table2[-2]['header']
# main_table2[-1]['rows'] = []
# main_table2[-1]['tabletype'] = main_table2[-2]['tabletype']
# main_table2[-1]['title'] = row
#else:
# main_table2[-1]['rows'].append(row)
if 'title' not in table or table['title'] == "":
table['title'] = ''
logger.warning('Warning could not find title:' + str(table))
return tables
# old funcs
def get_tables(pdf_path):
dept = os.path.basename(os.path.dirname(pdf_path))
return [page_to_table(page_layout) for page_layout in get_page_layouts(pdf_path)]
def stitch_together_tables(tables):
main_table = []
main_table2 = []
for page in tables:
for n, line in enumerate(page):
# try to join lines that run over table breaks
if (n == 0 and
len(line) > 1 and
len(list(filter(lambda s: s == '', line))) > 0 and
len(main_table) > 0):
#logger.debug('Stitching rows:' + str(main_table[-1]) + str(line))
for i in range(min(len(main_table[-1]), len(line))):
main_table[-1][i] += ' '
main_table[-1][i] += line[i]
#logger.debug('Stitched result:' + str(main_table[-1]))
else:
main_table.append(line)
tabletype = ''
main_table2.append({})
main_table2[-1]['header'] = []
main_table2[-1]['rows'] = []
main_table2[-1]['tabletype'] = 'unknown'
for row in main_table:
if row[0].lower().find('date') >= 0 or (row[0].lower() == 'date of meeting'):
if len(main_table2[-1]['rows']) > 0:
if 'title' not in main_table2[-1]:
main_table2[-1]['title'] = ['']
logger.warning('Warning could not find title:' + str(main_table2[-1]))
main_table2.append({})
main_table2[-1]['title'] = main_table2[-2]['title']
main_table2[-1]['header'] = []
main_table2[-1]['rows'] = []
main_table2[-1]['tabletype'] = main_table2[-2]['tabletype']
if len(row) >= 3:
if row[2].lower() == 'purpose of meeting':
logger.info('Probably found a meeting header row:' + str(row))
main_table2[-1]['tabletype'] = 'meeting'
main_table2[-1]['header'].append(row)
else:
logger.info('Probably found a header row:' + str(row))
main_table2[-1]['header'].append(row)
else:
logger.info('Probably found a header row:' + str(row))
main_table2[-1]['header'].append(row)
elif all(map(lambda s: s == '', row)):
logger.debug('Discarded empty row')
elif len(list(filter(lambda s: s == '', row))) > 0:
logger.warning('Discarded partial row, need to review:' + str(row))
elif len(row) == 1:
logger.info('Probably found a title:' + str(row))
if len(main_table2[-1]['rows']) > 0:
main_table2.append({})
main_table2[-1]['header'] = main_table2[-2]['header']
main_table2[-1]['rows'] = []
main_table2[-1]['tabletype'] = main_table2[-2]['tabletype']
main_table2[-1]['title'] = row
else:
main_table2[-1]['rows'].append(row)
if 'title' not in main_table2[-1]:
main_table2[-1]['title'] = ['']
logger.warning('Warning could not find title:' + str(main_table2[-1]))
return main_table2
def get_page_layouts(pdf_pathname):
try:
with open(pdf_pathname, 'rb') as fp:
parser = PDFParser(fp)
doc = PDFDocument()
parser.set_document(doc)
doc.set_parser(parser)
doc.initialize("")
if not doc.is_extractable:
raise PDFTextExtractionNotAllowed
resource_manager = PDFResourceManager()
device = PDFPageAggregator(resource_manager, laparams=LAParams())
interpreter = PDFPageInterpreter(resource_manager, device)
layouts = []
for page in doc.get_pages():
interpreter.process_page(page)
layouts.append(device.get_result())
logger.debug('Opened PDF with ' + str(len(layouts)) + ' pages')
return layouts
except EnvironmentError:
logger.error('Invalid PDF file: ' + pdf_pathname)
return []
except TypeError:
logger.error('Invalid PDF file causes problems with pdfminer: ' + pdf_pathname)
return []
def parse_test(pdfpath):
page_layouts = get_page_layouts(pdfpath)
print(len(page_layouts))
print(page_layouts)
objects_on_page = set(type(o) for o in page_layouts[3])
print(objects_on_page)
current_page = page_layouts[3]
texts = []
rects = []
# seperate text and rectangle elements
for e in current_page:
if isinstance(e, pdfminer.layout.LTTextBoxHorizontal):
texts.append(e)
elif isinstance(e, pdfminer.layout.LTRect):
rects.append(e)
# sort them into
characters = extract_characters(texts)
print(characters)
TEXT_ELEMENTS = [
pdfminer.layout.LTTextBox,
pdfminer.layout.LTTextBoxHorizontal,
pdfminer.layout.LTTextLine,
pdfminer.layout.LTTextLineHorizontal
]
def flatten(lst):
"""Flattens a list of lists"""
return [subelem for elem in lst for subelem in elem]
def extract_characters(element):
"""
Recursively extracts individual characters from
text elements.
"""
if isinstance(element, pdfminer.layout.LTChar):
return [element]
if any(isinstance(element, i) for i in TEXT_ELEMENTS):
return flatten([extract_characters(e) for e in element])
if isinstance(element, list):
return flatten([extract_characters(l) for l in element])
return []
gui_rects = []
chars = []
def get_rects():
return gui_rects
def get_chars():
return chars
def page_to_tables(page_layout):
texts = []
rectangles = []
other = []
logger.debug("Starting page")
for e in page_layout:
if isinstance(e, pdfminer.layout.LTTextBoxHorizontal):
texts.append(e)
elif isinstance(e, pdfminer.layout.LTRect):
rectangles.append(e)
else:
other.append(e)
characters = extract_characters(texts)
# Should set the limit from a reference, eg the area of a character in the smallest font
rects_by_size = filter(lambda r: ((r.height * r.width) > 5), rectangles)
#rects_by_size = list(filter(lambda x: x.width > 5, rects_by_size))
#rects_by_size = sorted(rects_by_size, key=lambda r: r.height * r.width, reverse=True)
rects_by_x = sorted(rects_by_size, key=lambda r: r.x0)
table_rects = []
remaining_rects = list(rects_by_x)
groups = []
while remaining_rects:
groups.append(PDF.rectangles.RectangleGroup(remaining_rects.pop()))
groups[-1].add_intersecting_rects(remaining_rects)
logger.debug("Grouping done")
# try filter(remaining_rects, intersects) ? keep filtering until len(0) result
#final_rects = table_rects
#for r in final_rects
#groups = []
#for k, g in itertools.groupby(rects_by_size, lambda x: x[0]):
# groups.append(list(g))
#gui_rects = list(groups)
#TODO page by page view
#gui_rects.append(groups)
bboxs = []
tables = []
for group in groups:
table_chars = list(filter(lambda x: PDF.rectangles.intersects(x, group.bbox), characters))
#TODO page by page view
#chars.append(table_chars)
tables.append([group, table_chars])
tables.reverse()
processed_tables = []
for table in tables:
processed_tables = processed_tables + process_table(table)
return list(filter(lambda x: len(x['cells']) > 0, processed_tables))
def process_table(table):
#import matplotlib.pyplot as plt
bbox = table[0].bbox
rects = table[0].rects
tchars = table[1]
bottom_line = pdfminer.layout.LTRect(1, (bbox.x0, bbox.y1, bbox.x1, bbox.y1))
top_line = pdfminer.layout.LTRect(1, (bbox.x0, bbox.y0, bbox.x1, bbox.y0))
left_line = pdfminer.layout.LTRect(1, (bbox.x0, bbox.y0, bbox.x0, bbox.y1))
#rects = list(filter(lambda x: x.width > 5, rects))
bottom_row = list(filter(lambda x: PDF.rectangles.intersects(x, bottom_line), rects))
bottom_row = sorted(bottom_row, key=lambda r: r.x0)
top_row = list(filter(lambda x: PDF.rectangles.intersects(x, top_line), rects))
top_row = sorted(top_row, key=lambda r: r.x0)
top_row = list(filter(lambda x: x.width > 5, top_row))
bottom_row2 = list(filter(lambda x: x.y1 >= bbox.y1 - 10, rects))
bottom_row2 = sorted(bottom_row2, key=lambda r: r.x0)
top_row2 = list(filter(lambda x: x.y0 <= bbox.y0 + 10, rects))
top_row2 = sorted(top_row2, key=lambda r: r.x0)
columns = []
rows = []
for c in top_row:
columns.append(pdfminer.layout.LTRect(1, (c.x0 - 1, bbox.y0 - 1, c.x1 + 1, bbox.y1 + 1)))
if len(columns) == 0:
logger.debug("Couldn't find any cols in this table, review")
return []
first_column = list(filter(lambda x: PDF.rectangles.contains(columns[0], x), rects))
first_column = list(filter(lambda x: PDF.rectangles.intersects(x, left_line), first_column))
first_column = list(filter(lambda x: x.width < 5, first_column))
first_column = sorted(first_column, key=lambda r: r.y0)
for r in first_column:
rows.append(pdfminer.layout.LTRect(1, (bbox.x0 - 1, r.y0 - 1, bbox.x1 + 1, r.y1 + 1)))
rows.reverse()
if len(rows) == 0:
logger.debug("Couldn't find any rows in this table, review")
return []
#cell0 = list(filter(lambda x: PDF.rectangles.contains(columns[0], x) and PDF.rectangles.contains(rows[0], x), tchars))
#cell0 = sorted(cell0, key=lambda r: r.y0)
title_row = ""
#i, j = 0
tables = []
table_cells = []
header = False
for row in rows:
title = False
rowend = False
table_row = []
for col in columns:
cell_right = pdfminer.layout.LTRect(1, (col.x1 - 2, row.y0 - 2, col.x1 + 2, row.y1 + 2))
for rect in rects:
if PDF.rectangles.contains(cell_right, rect):
rowend = True
break
if not rowend:
title = True
cell_rect = pdfminer.layout.LTRect(1, (row.x0 - 2, row.y0 - 2, row.x1 + 2, row.y1 + 2))
else:
cell_rect = pdfminer.layout.LTRect(1, (col.x0 - 2, row.y0 - 2, col.x1 + 2, row.y1 + 2))
cell_chars = [char for char in tchars if PDF.rectangles.contains(cell_rect, char)]
tchars[:] = [char for char in tchars if not PDF.rectangles.contains(cell_rect, char)]
cell_chars = sorted(cell_chars, key=lambda x: x.y0)
cell_chars.reverse()
import itertools
string = ''
for k, g in itertools.groupby(cell_chars, lambda x: x.y0):
t = sorted(g, key=lambda x: x.x0)
string += ''.join(list(map(lambda u: u.get_text(), t)))
if title:
title_row = ''.join(string).strip()
break
else:
table_row.append(string.strip())
t = eartime_table.Table('', [table_row])
if t.tabletype == 'meeting' and header:
# new table
tables.append({"title": title_row, "cells": table_cells})
table_cells = [title_row] + table_row
elif t.tabletype == 'meeting':
header = True
table_cells.append([title_row] + table_row)
#if header row start new table
elif not title:
table_cells.append([title_row] + table_row)
tables.append({"title": title_row, "cells": table_cells})
#print(title_row)
#for r in first_column:
#plt.plot(*zip(*r.pts))
#plt.show()
#print()
#plt.show()
#print()
return tables
def compact(groups, remaining_rects):
while remaining_rects:
r = remaining_rects.pop()
for group in groups:
for rect in group:
if PDF.rectangles.intersects(r, rect):
print(str(r) + " intersects " + str(rect))
group.append(r)
break
else:
continue
break
else:
print("found no existing group, starting a new one")
groups.append([r])
return groups
def old_compact_start(rects_by_x, table_rects):
for rect in rects_by_x:
rects_by_x.remove(rect)
found = False
for r in table_rects:
for r1 in r:
if PDF.rectangles.intersects(r1, rect):
r.append(rect)
found = True
break
if found:
break
if not found:
for rect2 in rects_by_x:
#if rect == rect2:
# continue
if PDF.rectangles.intersects(rect2, rect):
table_rects.append([rect, rect2])
rects_by_x.remove(rect2)
table_rects, compacted = compact_groups(table_rects)
while compacted:
table_rects, compacted = compact_groups(table_rects)
def compact_groups(groups):
compacted = False
for group in groups:
for group2 in groups:
found = False
if group2 != group:
for rect in group:
for rect2 in group2:
if PDF.rectangles.intersects(rect2, rect):
group += group2
groups.remove(group2)
found = True
compacted = True
break
if found:
break
return groups, compacted
def page_to_table(page_layout):
# todo the table stictching belongs in here
texts = []
rects = []
other = []
for e in page_layout:
if isinstance(e, pdfminer.layout.LTTextBoxHorizontal):
texts.append(e)
elif isinstance(e, pdfminer.layout.LTRect):
rects.append(e)
else:
other.append(e)
# convert text elements to characters
# and rectangles to lines
characters = extract_characters(texts)
#chars.append(characters)
lines = [cast_as_line(r) for r in rects
if width(r) < 2 and
area(r) > 1]
# match each character to a bounding rectangle where possible
box_char_dict = {}
for c in characters:
# choose the bounding box that occurs the majority of times for each of these:
bboxes = defaultdict(int)
l_x, l_y = c.bbox[0], c.bbox[1]
bbox_l = find_bounding_rectangle(l_x, l_y, lines)
bboxes[bbox_l] += 1
c_x, c_y = math.floor((c.bbox[0] + c.bbox[2]) / 2), math.floor((c.bbox[1] + c.bbox[3]) / 2)
bbox_c = find_bounding_rectangle(c_x, c_y, lines)
bboxes[bbox_c] += 1
u_x, u_y = c.bbox[2], c.bbox[3]
bbox_u = find_bounding_rectangle(u_x, u_y, lines)
bboxes[bbox_u] += 1
# if all values are in different boxes, default to character center.
# otherwise choose the majority.
if max(bboxes.values()) == 1:
bbox = bbox_c
else:
bbox = max(bboxes.items(), key=lambda x: x[1])[0]
if bbox is None:
continue
if bbox in box_char_dict.keys():
box_char_dict[bbox].append(c)
continue
box_char_dict[bbox] = [c]
# look for empty bounding boxes by scanning
# over a grid of values on the page
for x in range(100, 550, 10):
for y in range(50, 800, 10):
bbox = find_bounding_rectangle(x, y, lines)
if bbox is None:
continue
if bbox in box_char_dict.keys():
continue
box_char_dict[bbox] = []
return boxes_to_table(box_char_dict)
def flatten(lst):
return [subelem for elem in lst for subelem in elem]
def extract_characters(element):
if isinstance(element, pdfminer.layout.LTChar):
return [element]
if any(isinstance(element, i) for i in TEXT_ELEMENTS):
elements = []
for e in element:
elements += extract_characters(e)
return elements
if isinstance(element, list):
return flatten([extract_characters(l) for l in element])
return []
def width(rect):
x0, y0, x1, y1 = rect.bbox
return min(x1 - x0, y1 - y0)
def length(rect):
x0, y0, x1, y1 = rect.bbox
return max(x1 - x0, y1 - y0)
def area(rect):
x0, y0, x1, y1 = rect.bbox
return (x1 - x0) * (y1 - y0)
def cast_as_line(rect):
x0, y0, x1, y1 = rect.bbox
if x1 - x0 > y1 - y0:
return (x0, y0, x1, y0, "H")
else:
return (x0, y0, x0, y1, "V")
def does_it_intersect(x, xmin, xmax):
return (x <= xmax and x >= xmin)
def find_bounding_rectangle(x, y, lines):
v_intersects = [l for l in lines
if l[4] == "V"
and does_it_intersect(y, l[1], l[3])]
h_intersects = [l for l in lines
if l[4] == "H"
and does_it_intersect(x, l[0], l[2])]
if len(v_intersects) < 2 or len(h_intersects) < 2:
return None
v_left = [v[0] for v in v_intersects
if v[0] < x]
v_right = [v[0] for v in v_intersects
if v[0] > x]
if len(v_left) == 0 or len(v_right) == 0:
return None
x0, x1 = max(v_left), min(v_right)
h_down = [h[1] for h in h_intersects
if h[1] < y]
h_up = [h[1] for h in h_intersects
if h[1] > y]
if len(h_down) == 0 or len(h_up) == 0:
return None
y0, y1 = max(h_down), min(h_up)
return (x0, y0, x1, y1)
def chars_to_string(chars):
if not chars:
return ""
rows = sorted(list(set(c.bbox[1] for c in chars)), reverse=True)
text = ""
for row in rows:
sorted_row = sorted([c for c in chars if c.bbox[1] == row], key=lambda c: c.bbox[0])
text += "".join(c.get_text() for c in sorted_row)
return text
def boxes_to_table(box_record_dict):
boxes = box_record_dict.keys()
rows = sorted(list(set(b[1] for b in boxes)), reverse=True)
table = []
for row in rows:
sorted_row = sorted([b for b in boxes if b[1] == row], key=lambda b: b[0])
table.append([chars_to_string(box_record_dict[b]).strip() for b in sorted_row])
return table
def find_tables(rects):
rects_by_size = sorted(rects, key=lambda r: area(r)) | mit |
JT5D/scikit-learn | examples/grid_search_digits.py | 8 | 2665 | """
=====================================================================
Parameter estimation using grid search with a nested cross-validation
=====================================================================
This examples shows how a classifier is optimized by "nested"
cross-validation, which is done using the
:class:`sklearn.grid_search.GridSearchCV` object on a development set
that comprises only half of the available labeled data.
The performance of the selected hyper-parameters and trained model is
then measured on a dedicated evaluation set that was not used during
the model selection step.
More details on tools available for model selection can be found in the
sections on :ref:`cross_validation` and :ref:`grid_search`.
"""
from __future__ import print_function
from sklearn import datasets
from sklearn.cross_validation import train_test_split
from sklearn.grid_search import GridSearchCV
from sklearn.metrics import classification_report
from sklearn.svm import SVC
print(__doc__)
# Loading the Digits dataset
digits = datasets.load_digits()
# To apply an classifier on this data, we need to flatten the image, to
# turn the data in a (samples, feature) matrix:
n_samples = len(digits.images)
X = digits.images.reshape((n_samples, -1))
y = digits.target
# Split the dataset in two equal parts
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.5, random_state=0)
# Set the parameters by cross-validation
tuned_parameters = [{'kernel': ['rbf'], 'gamma': [1e-3, 1e-4],
'C': [1, 10, 100, 1000]},
{'kernel': ['linear'], 'C': [1, 10, 100, 1000]}]
scores = ['precision', 'recall']
for score in scores:
print("# Tuning hyper-parameters for %s" % score)
print()
clf = GridSearchCV(SVC(C=1), tuned_parameters, cv=5, scoring=score)
clf.fit(X_train, y_train)
print("Best parameters set found on development set:")
print()
print(clf.best_estimator_)
print()
print("Grid scores on development set:")
print()
for params, mean_score, scores in clf.grid_scores_:
print("%0.3f (+/-%0.03f) for %r"
% (mean_score, scores.std() / 2, params))
print()
print("Detailed classification report:")
print()
print("The model is trained on the full development set.")
print("The scores are computed on the full evaluation set.")
print()
y_true, y_pred = y_test, clf.predict(X_test)
print(classification_report(y_true, y_pred))
print()
# Note the problem is too easy: the hyperparameter plateau is too flat and the
# output model is the same for precision and recall with ties in quality.
| bsd-3-clause |
fbagirov/scikit-learn | benchmarks/bench_plot_nmf.py | 206 | 5890 | """
Benchmarks of Non-Negative Matrix Factorization
"""
from __future__ import print_function
from collections import defaultdict
import gc
from time import time
import numpy as np
from scipy.linalg import norm
from sklearn.decomposition.nmf import NMF, _initialize_nmf
from sklearn.datasets.samples_generator import make_low_rank_matrix
from sklearn.externals.six.moves import xrange
def alt_nnmf(V, r, max_iter=1000, tol=1e-3, R=None):
'''
A, S = nnmf(X, r, tol=1e-3, R=None)
Implement Lee & Seung's algorithm
Parameters
----------
V : 2-ndarray, [n_samples, n_features]
input matrix
r : integer
number of latent features
max_iter : integer, optional
maximum number of iterations (default: 1000)
tol : double
tolerance threshold for early exit (when the update factor is within
tol of 1., the function exits)
R : integer, optional
random seed
Returns
-------
A : 2-ndarray, [n_samples, r]
Component part of the factorization
S : 2-ndarray, [r, n_features]
Data part of the factorization
Reference
---------
"Algorithms for Non-negative Matrix Factorization"
by Daniel D Lee, Sebastian H Seung
(available at http://citeseer.ist.psu.edu/lee01algorithms.html)
'''
# Nomenclature in the function follows Lee & Seung
eps = 1e-5
n, m = V.shape
if R == "svd":
W, H = _initialize_nmf(V, r)
elif R is None:
R = np.random.mtrand._rand
W = np.abs(R.standard_normal((n, r)))
H = np.abs(R.standard_normal((r, m)))
for i in xrange(max_iter):
updateH = np.dot(W.T, V) / (np.dot(np.dot(W.T, W), H) + eps)
H *= updateH
updateW = np.dot(V, H.T) / (np.dot(W, np.dot(H, H.T)) + eps)
W *= updateW
if i % 10 == 0:
max_update = max(updateW.max(), updateH.max())
if abs(1. - max_update) < tol:
break
return W, H
def report(error, time):
print("Frobenius loss: %.5f" % error)
print("Took: %.2fs" % time)
print()
def benchmark(samples_range, features_range, rank=50, tolerance=1e-5):
it = 0
timeset = defaultdict(lambda: [])
err = defaultdict(lambda: [])
max_it = len(samples_range) * len(features_range)
for n_samples in samples_range:
for n_features in features_range:
print("%2d samples, %2d features" % (n_samples, n_features))
print('=======================')
X = np.abs(make_low_rank_matrix(n_samples, n_features,
effective_rank=rank, tail_strength=0.2))
gc.collect()
print("benchmarking nndsvd-nmf: ")
tstart = time()
m = NMF(n_components=30, tol=tolerance, init='nndsvd').fit(X)
tend = time() - tstart
timeset['nndsvd-nmf'].append(tend)
err['nndsvd-nmf'].append(m.reconstruction_err_)
report(m.reconstruction_err_, tend)
gc.collect()
print("benchmarking nndsvda-nmf: ")
tstart = time()
m = NMF(n_components=30, init='nndsvda',
tol=tolerance).fit(X)
tend = time() - tstart
timeset['nndsvda-nmf'].append(tend)
err['nndsvda-nmf'].append(m.reconstruction_err_)
report(m.reconstruction_err_, tend)
gc.collect()
print("benchmarking nndsvdar-nmf: ")
tstart = time()
m = NMF(n_components=30, init='nndsvdar',
tol=tolerance).fit(X)
tend = time() - tstart
timeset['nndsvdar-nmf'].append(tend)
err['nndsvdar-nmf'].append(m.reconstruction_err_)
report(m.reconstruction_err_, tend)
gc.collect()
print("benchmarking random-nmf")
tstart = time()
m = NMF(n_components=30, init=None, max_iter=1000,
tol=tolerance).fit(X)
tend = time() - tstart
timeset['random-nmf'].append(tend)
err['random-nmf'].append(m.reconstruction_err_)
report(m.reconstruction_err_, tend)
gc.collect()
print("benchmarking alt-random-nmf")
tstart = time()
W, H = alt_nnmf(X, r=30, R=None, tol=tolerance)
tend = time() - tstart
timeset['alt-random-nmf'].append(tend)
err['alt-random-nmf'].append(np.linalg.norm(X - np.dot(W, H)))
report(norm(X - np.dot(W, H)), tend)
return timeset, err
if __name__ == '__main__':
from mpl_toolkits.mplot3d import axes3d # register the 3d projection
axes3d
import matplotlib.pyplot as plt
samples_range = np.linspace(50, 500, 3).astype(np.int)
features_range = np.linspace(50, 500, 3).astype(np.int)
timeset, err = benchmark(samples_range, features_range)
for i, results in enumerate((timeset, err)):
fig = plt.figure('scikit-learn Non-Negative Matrix Factorization benchmark results')
ax = fig.gca(projection='3d')
for c, (label, timings) in zip('rbgcm', sorted(results.iteritems())):
X, Y = np.meshgrid(samples_range, features_range)
Z = np.asarray(timings).reshape(samples_range.shape[0],
features_range.shape[0])
# plot the actual surface
ax.plot_surface(X, Y, Z, rstride=8, cstride=8, alpha=0.3,
color=c)
# dummy point plot to stick the legend to since surface plot do not
# support legends (yet?)
ax.plot([1], [1], [1], color=c, label=label)
ax.set_xlabel('n_samples')
ax.set_ylabel('n_features')
zlabel = 'Time (s)' if i == 0 else 'reconstruction error'
ax.set_zlabel(zlabel)
ax.legend()
plt.show()
| bsd-3-clause |
bartromgens/climatemaps | bin/create_contour.py | 1 | 4856 | #!/usr/bin/env python3
import sys
import os
import math
import numpy
import matplotlib.pyplot as plt
sys.path.append('../climatemaps')
import climatemaps
from climatemaps.logger import logger
DATA_OUT_DIR = 'website/data'
TYPES = {
'precipitation': {
'filepath': 'data/precipitation/cpre6190.dat',
'conversion_factor': 0.1, # (millimetres/day) *10
'config': climatemaps.contour.ContourPlotConfig(0.1, 16, colormap=plt.cm.jet_r, title='Precipitation', unit='mm/day', logscale=True)
},
'cloud': {
'filepath': 'data/cloud/ccld6190.dat',
'conversion_factor': 1,
'config': climatemaps.contour.ContourPlotConfig(0, 100, colormap=plt.cm.jet_r, title='Cloud coverage', unit='%')
},
'mintemp': {
'filepath': 'data/mintemp/ctmn6190.dat',
'conversion_factor': 0.1,
'config': climatemaps.contour.ContourPlotConfig(-40, 28, colormap=plt.cm.jet, title='Min. temperature', unit='C')
},
'meantemp': {
'filepath': 'data/meantemp/ctmp6190.dat',
'conversion_factor': 0.1,
'config': climatemaps.contour.ContourPlotConfig(-30, 35, colormap=plt.cm.jet, title='Mean temperature', unit='C')
},
'maxtemp': {
'filepath': 'data/maxtemp/ctmx6190.dat',
'conversion_factor': 0.1,
'config': climatemaps.contour.ContourPlotConfig(-20, 45, colormap=plt.cm.jet, title='Max. temperature', unit='C')
},
'diurnaltemprange': {
'filepath': 'data/diurnaltemprange/cdtr6190.dat',
'conversion_factor': 0.1,
'config': climatemaps.contour.ContourPlotConfig(5, 20, colormap=plt.cm.jet, title='Diurnal temperature range', unit='C')
},
'wetdays': {
'filepath': 'data/wetdays/cwet6190.dat',
'conversion_factor': 0.1,
'config': climatemaps.contour.ContourPlotConfig(0, 30, colormap=plt.cm.jet_r, title='Wet days', unit='days')
},
'wind': {
'filepath': 'data/wind/cwnd6190.dat',
'conversion_factor': 0.1,
'config': climatemaps.contour.ContourPlotConfig(0, 9, colormap=plt.cm.jet, title='Wind speed', unit='m/s')
},
'radiation': {
'filepath': 'data/radiation/crad6190.dat',
'conversion_factor': 1.0,
'config': climatemaps.contour.ContourPlotConfig(0, 300, colormap=plt.cm.jet, title='Radiation', unit='W/m^2')
},
'vapourpressure': {
'filepath': 'data/vapourpressure/cvap6190.dat',
'conversion_factor': 0.1,
'config': climatemaps.contour.ContourPlotConfig(1, 34, colormap=plt.cm.jet, title='Vapour pressure', unit='hPa')
},
}
def main():
month_upper = 12
n_data_sets = len(TYPES) * month_upper
counter = 0
for data_type, settings in TYPES.items():
for month in range(1, month_upper+1):
logger.info('create image and tiles for "' + data_type + '" and month ' + str(month))
progress = counter/n_data_sets*100.0
logger.info("progress: " + str(int(progress)) + '%')
latrange, lonrange, Z = climatemaps.data.import_climate_data(settings['filepath'], month, settings['conversion_factor'])
contourmap = climatemaps.contour.Contour(settings['config'], lonrange, latrange, Z)
contourmap.create_contour_data(
DATA_OUT_DIR,
data_type,
month,
figure_dpi=1200
)
counter += 1
# for month in range(1, 13):
# create_optimal_map(month)
def create_optimal_map(month):
settings = TYPES['precipitation']
latrange, lonrange, Zpre = climatemaps.data.import_climate_data(settings['filepath'], month, settings['conversion_factor'])
settings = TYPES['cloud']
latrange, lonrange, Zcloud = climatemaps.data.import_climate_data(settings['filepath'], month, settings['conversion_factor'])
settings = TYPES['maxtemp']
latrange, lonrange, Ztmax = climatemaps.data.import_climate_data(settings['filepath'], month, settings['conversion_factor'])
for x in numpy.nditer(Zpre, op_flags=['readwrite']):
if x/16.0 > 1.0:
x[...] = 0.0
else:
x[...] = 1.0 - x/16.0
for x in numpy.nditer(Ztmax, op_flags=['readwrite']):
temp_ideal = 22
x[...] = 1.0 - math.pow((x-temp_ideal)/10.0, 2)
Zscore_cloud = (100 - Zcloud)/100
Z = (Zpre + Zscore_cloud + Ztmax) / 3.0 * 10.0
for x in numpy.nditer(Z, op_flags=['readwrite']):
x[...] = max(x, 0.0)
config = climatemaps.contour.ContourPlotConfig(0.0, 9.0, colormap=plt.cm.RdYlGn, unit='')
contourmap = climatemaps.contour.Contour(config, lonrange, latrange, Z)
contourmap.create_contour_data(
DATA_OUT_DIR,
'optimal',
month,
figure_dpi=1000
)
print('month done: ' + str(month))
if __name__ == "__main__":
main()
| mit |
Lucas-Armand/genetic-algorithm | dev/6ºSemana/testes of speed.py | 5 | 3255 | # -*- coding: utf-8 -*-
import os
import csv
import random
import numpy as np
import timeit
import time as Time
import matplotlib.pyplot as plt
import matplotlib.animation as animation
from itertools import product, combinations
class Block:
def __init__(self,point,a,b,c,weight,btype):
self.p=point
self.a=a
self.b=b
self.c=c
self.w=weight
self.t=btype
def csv_read(name): #Metodo de leitura, transforma um arquivo CSV em um vetor
CSV=open(name,'r')
dados=CSV.read()
dados=dados.replace(',','.')
dados=dados.replace(';',',')
CSV.close()
CSV=open("temp.csv",'w')
CSV.write(dados)
CSV.close()
CSV=open("temp.csv",'r')
dados=csv.reader(CSV)
v=[]
for i in dados:
I=[]
for j in i:
try:
j = float(j)
except:
pass
I.append(j)
v.append(I)
CSV.close()
os.remove("temp.csv")
return (v)
def defineGeometry(name):
vect = csv_read(name)
blockNumber ={}
for i in vect:
a = i[1]
b = i[2]
c = i[3]
point = [i[4],i[5],i[6]]
weight = i[7]
btype = i[-1]
block = Block(point,a,b,c,weight,btype)
blockNumber[i[0]] = block
return blockNumber
bNumb=defineGeometry('GeometriaNavio.csv')
# Define vicinity
#deck
vicinity={1:[2]}
for i in range(2,16):
vicinity[i] = [i-1,i+1]
vicinity[16] = [15]
#side
vicinity[17] = [18,19]
vicinity[18] = [17,20]
for i in range(19,31):
v = i-1 if i%2==0 else i+1
vicinity[i] = [v,i-2,i+2]
vicinity[31] = [29,32]
vicinity[32] = [30,31]
#bott
vicinity[33] = [34,35]
vicinity[34] = [33,36]
for i in range(35,63):
v = i-1 if i%2==0 else i+1
vicinity[i] = [v,i-2,i+2]
vicinity[63] = [61,64]
vicinity[64] = [63,62]
#coff
vicinity[65] = [66]
for i in range(66,70):
vicinity[i] = [i-1,i+1]
vicinity[70] = [69]
alfa = 10
beta = 1
built = []
time = 0
append = built.append
def order(x): return vicinity[x]
def time(bNumb,vicinity,chromo):
t_time = Time.time()
alfa = 1
built = []
time = 0
append = built.append
def time_vector(x,y):
for i in y:
if i in built:
time = alfa
break
try:time
except: time = 10*alfa
append(x)
return time
vic = [vicinity[x] for x in chromo]
time = sum((time_vector(x,y) for x,y in zip(chromo,vic)))
return time
chromo = [44, 39, 56, 47, 49, 37, 42, 46, 51, 58, 60, 62, 52, 41, 35, 33, 50, 61, 54, 34, 59, 43, 48, 45, 55, 53, 38, 57, 64, 67, 68, 63, 40, 36, 21, 66, 22, 6, 20, 65, 18, 5, 17, 69, 28, 27, 70, 29, 1, 12, 30, 13, 14, 26, 31, 24, 19, 2, 3, 4, 25, 11, 32, 10, 15, 16, 9, 23, 7, 8]
import cProfile
cProfile.run('time(bNumb,vicinity,chromo)')
##
##print timeit.timeit(setup='from __main__ import chromo;'+
## 'from __main__ import bNumb;'+
## 'from __main__ import time;'+
## 'from __main__ import vicinity '
## ,stmt='time(bNumb,vicinity,chromo)')
#print t.timeit(number = 1000000)
| gpl-3.0 |
tosolveit/scikit-learn | examples/model_selection/plot_roc_crossval.py | 247 | 3253 | """
=============================================================
Receiver Operating Characteristic (ROC) with cross validation
=============================================================
Example of Receiver Operating Characteristic (ROC) metric to evaluate
classifier output quality using cross-validation.
ROC curves typically feature true positive rate on the Y axis, and false
positive rate on the X axis. This means that the top left corner of the plot is
the "ideal" point - a false positive rate of zero, and a true positive rate of
one. This is not very realistic, but it does mean that a larger area under the
curve (AUC) is usually better.
The "steepness" of ROC curves is also important, since it is ideal to maximize
the true positive rate while minimizing the false positive rate.
This example shows the ROC response of different datasets, created from K-fold
cross-validation. Taking all of these curves, it is possible to calculate the
mean area under curve, and see the variance of the curve when the
training set is split into different subsets. This roughly shows how the
classifier output is affected by changes in the training data, and how
different the splits generated by K-fold cross-validation are from one another.
.. note::
See also :func:`sklearn.metrics.auc_score`,
:func:`sklearn.cross_validation.cross_val_score`,
:ref:`example_model_selection_plot_roc.py`,
"""
print(__doc__)
import numpy as np
from scipy import interp
import matplotlib.pyplot as plt
from sklearn import svm, datasets
from sklearn.metrics import roc_curve, auc
from sklearn.cross_validation import StratifiedKFold
###############################################################################
# Data IO and generation
# import some data to play with
iris = datasets.load_iris()
X = iris.data
y = iris.target
X, y = X[y != 2], y[y != 2]
n_samples, n_features = X.shape
# Add noisy features
random_state = np.random.RandomState(0)
X = np.c_[X, random_state.randn(n_samples, 200 * n_features)]
###############################################################################
# Classification and ROC analysis
# Run classifier with cross-validation and plot ROC curves
cv = StratifiedKFold(y, n_folds=6)
classifier = svm.SVC(kernel='linear', probability=True,
random_state=random_state)
mean_tpr = 0.0
mean_fpr = np.linspace(0, 1, 100)
all_tpr = []
for i, (train, test) in enumerate(cv):
probas_ = classifier.fit(X[train], y[train]).predict_proba(X[test])
# Compute ROC curve and area the curve
fpr, tpr, thresholds = roc_curve(y[test], probas_[:, 1])
mean_tpr += interp(mean_fpr, fpr, tpr)
mean_tpr[0] = 0.0
roc_auc = auc(fpr, tpr)
plt.plot(fpr, tpr, lw=1, label='ROC fold %d (area = %0.2f)' % (i, roc_auc))
plt.plot([0, 1], [0, 1], '--', color=(0.6, 0.6, 0.6), label='Luck')
mean_tpr /= len(cv)
mean_tpr[-1] = 1.0
mean_auc = auc(mean_fpr, mean_tpr)
plt.plot(mean_fpr, mean_tpr, 'k--',
label='Mean ROC (area = %0.2f)' % mean_auc, lw=2)
plt.xlim([-0.05, 1.05])
plt.ylim([-0.05, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('Receiver operating characteristic example')
plt.legend(loc="lower right")
plt.show()
| bsd-3-clause |
nikitasingh981/scikit-learn | examples/ensemble/plot_isolation_forest.py | 39 | 2361 | """
==========================================
IsolationForest example
==========================================
An example using IsolationForest for anomaly detection.
The IsolationForest 'isolates' observations by randomly selecting a feature
and then randomly selecting a split value between the maximum and minimum
values of the selected feature.
Since recursive partitioning can be represented by a tree structure, the
number of splittings required to isolate a sample is equivalent to the path
length from the root node to the terminating node.
This path length, averaged over a forest of such random trees, is a measure
of normality and our decision function.
Random partitioning produces noticeable shorter paths for anomalies.
Hence, when a forest of random trees collectively produce shorter path lengths
for particular samples, they are highly likely to be anomalies.
.. [1] Liu, Fei Tony, Ting, Kai Ming and Zhou, Zhi-Hua. "Isolation forest."
Data Mining, 2008. ICDM'08. Eighth IEEE International Conference on.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.ensemble import IsolationForest
rng = np.random.RandomState(42)
# Generate train data
X = 0.3 * rng.randn(100, 2)
X_train = np.r_[X + 2, X - 2]
# Generate some regular novel observations
X = 0.3 * rng.randn(20, 2)
X_test = np.r_[X + 2, X - 2]
# Generate some abnormal novel observations
X_outliers = rng.uniform(low=-4, high=4, size=(20, 2))
# fit the model
clf = IsolationForest(max_samples=100, random_state=rng)
clf.fit(X_train)
y_pred_train = clf.predict(X_train)
y_pred_test = clf.predict(X_test)
y_pred_outliers = clf.predict(X_outliers)
# plot the line, the samples, and the nearest vectors to the plane
xx, yy = np.meshgrid(np.linspace(-5, 5, 50), np.linspace(-5, 5, 50))
Z = clf.decision_function(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
plt.title("IsolationForest")
plt.contourf(xx, yy, Z, cmap=plt.cm.Blues_r)
b1 = plt.scatter(X_train[:, 0], X_train[:, 1], c='white')
b2 = plt.scatter(X_test[:, 0], X_test[:, 1], c='green')
c = plt.scatter(X_outliers[:, 0], X_outliers[:, 1], c='red')
plt.axis('tight')
plt.xlim((-5, 5))
plt.ylim((-5, 5))
plt.legend([b1, b2, c],
["training observations",
"new regular observations", "new abnormal observations"],
loc="upper left")
plt.show()
| bsd-3-clause |
RachitKansal/scikit-learn | sklearn/cluster/tests/test_dbscan.py | 176 | 12155 | """
Tests for DBSCAN clustering algorithm
"""
import pickle
import numpy as np
from scipy.spatial import distance
from scipy import sparse
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_in
from sklearn.utils.testing import assert_not_in
from sklearn.neighbors import NearestNeighbors
from sklearn.cluster.dbscan_ import DBSCAN
from sklearn.cluster.dbscan_ import dbscan
from sklearn.cluster.tests.common import generate_clustered_data
from sklearn.metrics.pairwise import pairwise_distances
n_clusters = 3
X = generate_clustered_data(n_clusters=n_clusters)
def test_dbscan_similarity():
# Tests the DBSCAN algorithm with a similarity array.
# Parameters chosen specifically for this task.
eps = 0.15
min_samples = 10
# Compute similarities
D = distance.squareform(distance.pdist(X))
D /= np.max(D)
# Compute DBSCAN
core_samples, labels = dbscan(D, metric="precomputed", eps=eps,
min_samples=min_samples)
# number of clusters, ignoring noise if present
n_clusters_1 = len(set(labels)) - (1 if -1 in labels else 0)
assert_equal(n_clusters_1, n_clusters)
db = DBSCAN(metric="precomputed", eps=eps, min_samples=min_samples)
labels = db.fit(D).labels_
n_clusters_2 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_2, n_clusters)
def test_dbscan_feature():
# Tests the DBSCAN algorithm with a feature vector array.
# Parameters chosen specifically for this task.
# Different eps to other test, because distance is not normalised.
eps = 0.8
min_samples = 10
metric = 'euclidean'
# Compute DBSCAN
# parameters chosen for task
core_samples, labels = dbscan(X, metric=metric, eps=eps,
min_samples=min_samples)
# number of clusters, ignoring noise if present
n_clusters_1 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_1, n_clusters)
db = DBSCAN(metric=metric, eps=eps, min_samples=min_samples)
labels = db.fit(X).labels_
n_clusters_2 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_2, n_clusters)
def test_dbscan_sparse():
core_sparse, labels_sparse = dbscan(sparse.lil_matrix(X), eps=.8,
min_samples=10)
core_dense, labels_dense = dbscan(X, eps=.8, min_samples=10)
assert_array_equal(core_dense, core_sparse)
assert_array_equal(labels_dense, labels_sparse)
def test_dbscan_sparse_precomputed():
D = pairwise_distances(X)
nn = NearestNeighbors(radius=.9).fit(X)
D_sparse = nn.radius_neighbors_graph(mode='distance')
# Ensure it is sparse not merely on diagonals:
assert D_sparse.nnz < D.shape[0] * (D.shape[0] - 1)
core_sparse, labels_sparse = dbscan(D_sparse,
eps=.8,
min_samples=10,
metric='precomputed')
core_dense, labels_dense = dbscan(D, eps=.8, min_samples=10,
metric='precomputed')
assert_array_equal(core_dense, core_sparse)
assert_array_equal(labels_dense, labels_sparse)
def test_dbscan_no_core_samples():
rng = np.random.RandomState(0)
X = rng.rand(40, 10)
X[X < .8] = 0
for X_ in [X, sparse.csr_matrix(X)]:
db = DBSCAN(min_samples=6).fit(X_)
assert_array_equal(db.components_, np.empty((0, X_.shape[1])))
assert_array_equal(db.labels_, -1)
assert_equal(db.core_sample_indices_.shape, (0,))
def test_dbscan_callable():
# Tests the DBSCAN algorithm with a callable metric.
# Parameters chosen specifically for this task.
# Different eps to other test, because distance is not normalised.
eps = 0.8
min_samples = 10
# metric is the function reference, not the string key.
metric = distance.euclidean
# Compute DBSCAN
# parameters chosen for task
core_samples, labels = dbscan(X, metric=metric, eps=eps,
min_samples=min_samples,
algorithm='ball_tree')
# number of clusters, ignoring noise if present
n_clusters_1 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_1, n_clusters)
db = DBSCAN(metric=metric, eps=eps, min_samples=min_samples,
algorithm='ball_tree')
labels = db.fit(X).labels_
n_clusters_2 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_2, n_clusters)
def test_dbscan_balltree():
# Tests the DBSCAN algorithm with balltree for neighbor calculation.
eps = 0.8
min_samples = 10
D = pairwise_distances(X)
core_samples, labels = dbscan(D, metric="precomputed", eps=eps,
min_samples=min_samples)
# number of clusters, ignoring noise if present
n_clusters_1 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_1, n_clusters)
db = DBSCAN(p=2.0, eps=eps, min_samples=min_samples, algorithm='ball_tree')
labels = db.fit(X).labels_
n_clusters_2 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_2, n_clusters)
db = DBSCAN(p=2.0, eps=eps, min_samples=min_samples, algorithm='kd_tree')
labels = db.fit(X).labels_
n_clusters_3 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_3, n_clusters)
db = DBSCAN(p=1.0, eps=eps, min_samples=min_samples, algorithm='ball_tree')
labels = db.fit(X).labels_
n_clusters_4 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_4, n_clusters)
db = DBSCAN(leaf_size=20, eps=eps, min_samples=min_samples,
algorithm='ball_tree')
labels = db.fit(X).labels_
n_clusters_5 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_5, n_clusters)
def test_input_validation():
# DBSCAN.fit should accept a list of lists.
X = [[1., 2.], [3., 4.]]
DBSCAN().fit(X) # must not raise exception
def test_dbscan_badargs():
# Test bad argument values: these should all raise ValueErrors
assert_raises(ValueError,
dbscan,
X, eps=-1.0)
assert_raises(ValueError,
dbscan,
X, algorithm='blah')
assert_raises(ValueError,
dbscan,
X, metric='blah')
assert_raises(ValueError,
dbscan,
X, leaf_size=-1)
assert_raises(ValueError,
dbscan,
X, p=-1)
def test_pickle():
obj = DBSCAN()
s = pickle.dumps(obj)
assert_equal(type(pickle.loads(s)), obj.__class__)
def test_boundaries():
# ensure min_samples is inclusive of core point
core, _ = dbscan([[0], [1]], eps=2, min_samples=2)
assert_in(0, core)
# ensure eps is inclusive of circumference
core, _ = dbscan([[0], [1], [1]], eps=1, min_samples=2)
assert_in(0, core)
core, _ = dbscan([[0], [1], [1]], eps=.99, min_samples=2)
assert_not_in(0, core)
def test_weighted_dbscan():
# ensure sample_weight is validated
assert_raises(ValueError, dbscan, [[0], [1]], sample_weight=[2])
assert_raises(ValueError, dbscan, [[0], [1]], sample_weight=[2, 3, 4])
# ensure sample_weight has an effect
assert_array_equal([], dbscan([[0], [1]], sample_weight=None,
min_samples=6)[0])
assert_array_equal([], dbscan([[0], [1]], sample_weight=[5, 5],
min_samples=6)[0])
assert_array_equal([0], dbscan([[0], [1]], sample_weight=[6, 5],
min_samples=6)[0])
assert_array_equal([0, 1], dbscan([[0], [1]], sample_weight=[6, 6],
min_samples=6)[0])
# points within eps of each other:
assert_array_equal([0, 1], dbscan([[0], [1]], eps=1.5,
sample_weight=[5, 1], min_samples=6)[0])
# and effect of non-positive and non-integer sample_weight:
assert_array_equal([], dbscan([[0], [1]], sample_weight=[5, 0],
eps=1.5, min_samples=6)[0])
assert_array_equal([0, 1], dbscan([[0], [1]], sample_weight=[5.9, 0.1],
eps=1.5, min_samples=6)[0])
assert_array_equal([0, 1], dbscan([[0], [1]], sample_weight=[6, 0],
eps=1.5, min_samples=6)[0])
assert_array_equal([], dbscan([[0], [1]], sample_weight=[6, -1],
eps=1.5, min_samples=6)[0])
# for non-negative sample_weight, cores should be identical to repetition
rng = np.random.RandomState(42)
sample_weight = rng.randint(0, 5, X.shape[0])
core1, label1 = dbscan(X, sample_weight=sample_weight)
assert_equal(len(label1), len(X))
X_repeated = np.repeat(X, sample_weight, axis=0)
core_repeated, label_repeated = dbscan(X_repeated)
core_repeated_mask = np.zeros(X_repeated.shape[0], dtype=bool)
core_repeated_mask[core_repeated] = True
core_mask = np.zeros(X.shape[0], dtype=bool)
core_mask[core1] = True
assert_array_equal(np.repeat(core_mask, sample_weight), core_repeated_mask)
# sample_weight should work with precomputed distance matrix
D = pairwise_distances(X)
core3, label3 = dbscan(D, sample_weight=sample_weight,
metric='precomputed')
assert_array_equal(core1, core3)
assert_array_equal(label1, label3)
# sample_weight should work with estimator
est = DBSCAN().fit(X, sample_weight=sample_weight)
core4 = est.core_sample_indices_
label4 = est.labels_
assert_array_equal(core1, core4)
assert_array_equal(label1, label4)
est = DBSCAN()
label5 = est.fit_predict(X, sample_weight=sample_weight)
core5 = est.core_sample_indices_
assert_array_equal(core1, core5)
assert_array_equal(label1, label5)
assert_array_equal(label1, est.labels_)
def test_dbscan_core_samples_toy():
X = [[0], [2], [3], [4], [6], [8], [10]]
n_samples = len(X)
for algorithm in ['brute', 'kd_tree', 'ball_tree']:
# Degenerate case: every sample is a core sample, either with its own
# cluster or including other close core samples.
core_samples, labels = dbscan(X, algorithm=algorithm, eps=1,
min_samples=1)
assert_array_equal(core_samples, np.arange(n_samples))
assert_array_equal(labels, [0, 1, 1, 1, 2, 3, 4])
# With eps=1 and min_samples=2 only the 3 samples from the denser area
# are core samples. All other points are isolated and considered noise.
core_samples, labels = dbscan(X, algorithm=algorithm, eps=1,
min_samples=2)
assert_array_equal(core_samples, [1, 2, 3])
assert_array_equal(labels, [-1, 0, 0, 0, -1, -1, -1])
# Only the sample in the middle of the dense area is core. Its two
# neighbors are edge samples. Remaining samples are noise.
core_samples, labels = dbscan(X, algorithm=algorithm, eps=1,
min_samples=3)
assert_array_equal(core_samples, [2])
assert_array_equal(labels, [-1, 0, 0, 0, -1, -1, -1])
# It's no longer possible to extract core samples with eps=1:
# everything is noise.
core_samples, labels = dbscan(X, algorithm=algorithm, eps=1,
min_samples=4)
assert_array_equal(core_samples, [])
assert_array_equal(labels, -np.ones(n_samples))
def test_dbscan_precomputed_metric_with_degenerate_input_arrays():
# see https://github.com/scikit-learn/scikit-learn/issues/4641 for
# more details
X = np.eye(10)
labels = DBSCAN(eps=0.5, metric='precomputed').fit(X).labels_
assert_equal(len(set(labels)), 1)
X = np.zeros((10, 10))
labels = DBSCAN(eps=0.5, metric='precomputed').fit(X).labels_
assert_equal(len(set(labels)), 1)
| bsd-3-clause |
tayebzaidi/HonorsThesisTZ | ThesisCode/DES_Pipeline/gen_lightcurves/visualizeLCurves.py | 1 | 3137 | #!/usr/bin/env python
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
import json
import os
import sys
import numpy as np
import math
import pickle
def main():
path = "./des_sn.p"
output_lightcurves_file = 'selectedLightcurves'
output_lightcurves = []
with open(path, 'rb') as f:
lightcurves = pickle.load(f)
filenames = list(lightcurves.keys())
#Randomize the file order to allow for fairer selection of the sub-sample
filenames = np.random.permutation(filenames)
num_files = len(filenames)
j = 0
for filename in filenames:
j += 1
objname = str(filename)
file_data = lightcurves[filename]
#Ignore all non-CSP or CfA entries
#for k in list(file_data.keys()):
# if not (k.endswith('CSP') or ('CfA' in k)):
# del file_data[k]
if len(file_data) == 0:
continue
N = len(file_data)
if N < 3:
cols = 1
else:
cols = 3
rows = int(math.ceil(N / cols))
gs = gridspec.GridSpec(rows, cols)
#fig = plt.figure(figsize=(10, 12))
#fig.suptitle(objname)
for i, filt in enumerate(file_data.keys()):
mjd = file_data[filt]['mjd']
mag = file_data[filt]['mag']
mag_err = file_data[filt]['dmag']
model_phase = file_data[filt]['modeldate']
model_mag = file_data[filt]['modelmag']
#bspline_mag = file_data[filt]['bsplinemag']
#modelmag_sub = file_data[filt]['modelmag_sub']
type = file_data[filt]['type']
#ax = fig.add_subplot(gs[i])
#ax.errorbar(mjd, mag, fmt='r', yerr=mag_err,label='Original Data', alpha=0.7)
#ymin, ymax = ax.get_ylim()
#ax.plot(model_phase, model_mag, '-k', label='GP Smoothed Data')
#ax.plot(model_phase, bspline_mag, '-g', label='Spline Smoothed Data')
#ax.plot(model_phase, modelmag_sub, '-k', label='GP/Bspline subtracted', linewidth=1.5)
#ax.set_ylim(ymin, ymax)
#Print outlier stats
mag_range = np.ptp(model_mag)
old_mag_range = np.ptp(mag)
#print(objname, filt)
#plt.draw()
#plt.pause(0.05)
print("Number of files currently: ", len(output_lightcurves))
print("Supernova Type: ", type)
#keystroke = input("<Hit Enter To Close>")
if j>2:
keystroke = 'q'
else:
print(i)
keystroke = '.'
if keystroke == '.':
output_lightcurves.append(objname)
elif keystroke == 'q':
print("Writing to file")
with open(output_lightcurves_file, 'w') as out:
for objname in output_lightcurves:
out.write(objname + '\n')
#plt.close()
sys.exit()
#plt.close()
with open(output_lightcurves_file, 'w') as out:
for objname in output_lightcurves:
out.write(objname + '\n')
if __name__=="__main__":
sys.exit(main())
| gpl-3.0 |
ChangLe008/iarc007_hitcsc | iarc/src/pilot/scripts/lidar.py | 2 | 2836 | #coding:UTF-8
"""
Created on 2017/04/07
@author: Leonidas
"""
import matplotlib.pyplot as plt
import string
import math
#ResultFormat = "%0.4f"
#Usage: ResultFormat%(1/3)
str = '/home/hitcsc/catkin_ws/log/iarc/lidar.txt'
a = open(str)
text = a.readlines()
obs_num = 0
class obs:
def __init__(self):
self.real = 0 # 可靠性
self.x = 0
self.y = 0
self.serial = 0
self.lost_counter = 0
t_m = []
x = []
y = []
temp_x = []
temp_y = []
x0 = []
y0 = []
x1 = []
y1 = []
for ctdata in text:
ct = ctdata.split("\t")
t_m.append(string.atoi(ct[0]))
for item in range(int(string.atof(ct[0]))):
temp_x.append(string.atof(ct[1+item*2]))
temp_y.append(string.atof(ct[2+item*2]))
x.append(temp_x)
y.append(temp_y)
temp_x = []
temp_y = []
obstacle = []
for lenth in range(len(t_m)):
abc_instance = [obs() for i in range(t_m[lenth])]
counter = 1
for i in range(t_m[lenth]):
if(math.sqrt(x[lenth][i]*x[lenth][i]+y[lenth][i]*y[lenth][i])<=0.5):
abc_instance[i].real=0
else:
abc_instance[i].real=1
abc_instance[i].x=x[lenth][i]
abc_instance[i].y=y[lenth][i]
abc_instance[i].serial=counter
counter = counter + 1
if len(obstacle)==0:
for i in range(t_m[lenth]):
obs_temp = 0
if abc_instance[i].real==1:
obstacle.append(abc_instance[i])
obstacle[obs_temp].serial= obs_temp+1
obs_temp = obs_temp+1
# elif counter == len(obstacle):
# 12
else:
temp_dis = []
for h in range(len(obstacle)):
for i in range(t_m[lenth]):
if abc_instance[i].real==1:
temp_dis.append(math.sqrt((abc_instance[i].x-obstacle[h].x)*(abc_instance[i].x-obstacle[h].x)+(abc_instance[i].y-obstacle[h].y)*(abc_instance[i].y-obstacle[h].y)))
else:
temp_dis.append(100000)
aa = temp_dis.index(min(temp_dis))
if aa <=0.05:
obstacle[h].x = abc_instance[aa].x
obstacle[h].y = abc_instance[aa].y
obstacle[h].lost_counter = 0
obstacle[h].real = 1
print obstacle[h].x,obstacle[h].y,h
elif obstacle[h].lost_counter>=10:
obstacle[h].real = 0
else:
obstacle[h].lost_counter = obstacle[h].lost_counter+1
temp_dis = []
# x0.append(obstacle[0].x)
# y0.append(obstacle[0].y)
x1.append(obstacle[2].x)
y1.append(obstacle[2].y)
# plt.plot(ttt,ul,'k',label='ul')
# plt.legend()
# plt.grid()
# plt.show()
# print xs
plt.plot(x1,y1,'r')
plt.plot(x0,y0,'b2')
plt.grid()
plt.show()
print abc_instance[0].lost_counter | bsd-2-clause |
yunfeilu/scikit-learn | examples/classification/plot_classifier_comparison.py | 66 | 4895 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=====================
Classifier comparison
=====================
A comparison of a several classifiers in scikit-learn on synthetic datasets.
The point of this example is to illustrate the nature of decision boundaries
of different classifiers.
This should be taken with a grain of salt, as the intuition conveyed by
these examples does not necessarily carry over to real datasets.
Particularly in high-dimensional spaces, data can more easily be separated
linearly and the simplicity of classifiers such as naive Bayes and linear SVMs
might lead to better generalization than is achieved by other classifiers.
The plots show training points in solid colors and testing points
semi-transparent. The lower right shows the classification accuracy on the test
set.
"""
print(__doc__)
# Code source: Gaël Varoquaux
# Andreas Müller
# Modified for documentation by Jaques Grobler
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.colors import ListedColormap
from sklearn.cross_validation import train_test_split
from sklearn.preprocessing import StandardScaler
from sklearn.datasets import make_moons, make_circles, make_classification
from sklearn.neighbors import KNeighborsClassifier
from sklearn.svm import SVC
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier, AdaBoostClassifier
from sklearn.naive_bayes import GaussianNB
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
from sklearn.discriminant_analysis import QuadraticDiscriminantAnalysis
h = .02 # step size in the mesh
names = ["Nearest Neighbors", "Linear SVM", "RBF SVM", "Decision Tree",
"Random Forest", "AdaBoost", "Naive Bayes", "Linear Discriminant Analysis",
"Quadratic Discriminant Analysis"]
classifiers = [
KNeighborsClassifier(3),
SVC(kernel="linear", C=0.025),
SVC(gamma=2, C=1),
DecisionTreeClassifier(max_depth=5),
RandomForestClassifier(max_depth=5, n_estimators=10, max_features=1),
AdaBoostClassifier(),
GaussianNB(),
LinearDiscriminantAnalysis(),
QuadraticDiscriminantAnalysis()]
X, y = make_classification(n_features=2, n_redundant=0, n_informative=2,
random_state=1, n_clusters_per_class=1)
rng = np.random.RandomState(2)
X += 2 * rng.uniform(size=X.shape)
linearly_separable = (X, y)
datasets = [make_moons(noise=0.3, random_state=0),
make_circles(noise=0.2, factor=0.5, random_state=1),
linearly_separable
]
figure = plt.figure(figsize=(27, 9))
i = 1
# iterate over datasets
for ds in datasets:
# preprocess dataset, split into training and test part
X, y = ds
X = StandardScaler().fit_transform(X)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=.4)
x_min, x_max = X[:, 0].min() - .5, X[:, 0].max() + .5
y_min, y_max = X[:, 1].min() - .5, X[:, 1].max() + .5
xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
np.arange(y_min, y_max, h))
# just plot the dataset first
cm = plt.cm.RdBu
cm_bright = ListedColormap(['#FF0000', '#0000FF'])
ax = plt.subplot(len(datasets), len(classifiers) + 1, i)
# Plot the training points
ax.scatter(X_train[:, 0], X_train[:, 1], c=y_train, cmap=cm_bright)
# and testing points
ax.scatter(X_test[:, 0], X_test[:, 1], c=y_test, cmap=cm_bright, alpha=0.6)
ax.set_xlim(xx.min(), xx.max())
ax.set_ylim(yy.min(), yy.max())
ax.set_xticks(())
ax.set_yticks(())
i += 1
# iterate over classifiers
for name, clf in zip(names, classifiers):
ax = plt.subplot(len(datasets), len(classifiers) + 1, i)
clf.fit(X_train, y_train)
score = clf.score(X_test, y_test)
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, m_max]x[y_min, y_max].
if hasattr(clf, "decision_function"):
Z = clf.decision_function(np.c_[xx.ravel(), yy.ravel()])
else:
Z = clf.predict_proba(np.c_[xx.ravel(), yy.ravel()])[:, 1]
# Put the result into a color plot
Z = Z.reshape(xx.shape)
ax.contourf(xx, yy, Z, cmap=cm, alpha=.8)
# Plot also the training points
ax.scatter(X_train[:, 0], X_train[:, 1], c=y_train, cmap=cm_bright)
# and testing points
ax.scatter(X_test[:, 0], X_test[:, 1], c=y_test, cmap=cm_bright,
alpha=0.6)
ax.set_xlim(xx.min(), xx.max())
ax.set_ylim(yy.min(), yy.max())
ax.set_xticks(())
ax.set_yticks(())
ax.set_title(name)
ax.text(xx.max() - .3, yy.min() + .3, ('%.2f' % score).lstrip('0'),
size=15, horizontalalignment='right')
i += 1
figure.subplots_adjust(left=.02, right=.98)
plt.show()
| bsd-3-clause |
fermiPy/lcpipe | runLCWeekly.py | 1 | 1277 | import sys
from fermipy import utils
utils.init_matplotlib_backend()
from fermipy.gtanalysis import GTAnalysis
from fermipy.utils import *
import yaml
import pprint
import numpy
import argparse
from fermipy.gtanalysis import GTAnalysis
def main():
usage = "usage: %(prog)s [config file]"
description = "Run fermipy analysis chain."
parser = argparse.ArgumentParser(usage=usage,description=description)
parser.add_argument('--config', default = 'sample_config.yaml')
parser.add_argument('--source', default = None)
args = parser.parse_args()
gta = GTAnalysis(args.config)
if args.source is None:
src_name = gta.roi.sources[0].name
gta.setup()
gta.optimize()
loc = gta.localize(src_name, free_radius=1.0, update=True, make_plots=True)
model = {'Index' : 2.0, 'SpatialModel' : 'PointSource'}
srcs = gta.find_sources(model=model, sqrt_ts_threshold=5.0,
min_separation=0.5)
sed = gta.sed(src_name, free_radius=1.0, make_plots=True)
gta.tsmap(make_plots=True)
gta.write_roi('fit0')
lc = gta.lightcurve(src_name, binsz=86400.*7.0, free_radius=3.0, use_scaled_srcmap=True,
multithread=False)
if __name__ == "__main__":
main()
| bsd-3-clause |
MattNolanLab/ei-attractor | grid_cell_model/simulations/007_noise/figures/paper/ee_connections/config.py | 1 | 2573 |
'''Configuration file for the noise paper.'''
from __future__ import absolute_import, print_function
import os.path
import matplotlib.ticker as ti
from noisefigs.plotters.base import SeparateMultipageSaver
def get_config():
return _config
ROOT_DIR = ['simulation_data', 'ee_connections']
_config = {
'grids_data_root': os.path.join(*(ROOT_DIR + ['grids'])),
'bump_data_root': os.path.join(*(ROOT_DIR + ['gamma_bump'])),
'vel_data_root': os.path.join(*(ROOT_DIR + ['velocity'])),
'const_pos_data_root': None,
'singleDataRoot': None,
'GridExampleRectPlotter': {
'fig_saver': SeparateMultipageSaver(None, 'pdf')
},
'MainBumpFormationPlotter': {
'plot_grid_contours': [1, 1, 1],
},
'GammaSweepsPlotter': {
'plot_grid_contours': [0, 1, 0],
'AC_vmin': -0.12,
'AC_vmax': 0.68,
'F_vmin': 30,
'F_vmax': 121.5,
'F_cbar_kw': dict(
extend = 'neither',
),
},
'GammaExamplePlotter': {
'yscale_kw': [[
dict(
scaleLen=5,
unitsText='nA',
x=.5, y=.1,
size='x-small'
),
dict(
scaleLen=0.5,
unitsText='nA',
x=.5, y=.05,
size='x-small'
),
dict(
scaleLen=0.5,
unitsText='nA',
x=.5, y=.05,
size='x-small'
)],
[dict(
scaleLen=3,
unitsText='nA',
x=.5, y=.05,
size='x-small'
),
dict(
scaleLen=0.5,
unitsText='nA',
x=.5, y=.05,
size='x-small'
),
dict(
scaleLen=0.5,
unitsText='nA',
x=.55, y=0,
size='x-small'
)]],
},
'MaxPopulationFRSweepsPlotter': {
'plot_grid_contours': [1, 1, 1],
},
'PSeizureSweepPlotter': {
'plot_grid_contours': [1, 1, 1],
'FRThreshold': 300,
},
'BumpDriftAtTimePlotter': {
'plot_grid_contours': [1, 1, 1],
},
'VelFitErrSweepPlotter': {
'plot_contours': [1, 1, 1],
'vmin': 0.2,
'vmax': 13.93,
},
'VelFitStdSweepPlotter': {
'plot_contours': [1, 1, 1],
},
'VelSlopeSweepPlotter': {
'plot_contours': [1, 1, 1],
'vmin': -0.26,
'vmax': 1.26,
},
}
| gpl-3.0 |
Eward5513/oceanbase | oceanbase_0.4/tools/deploy/perf/1.py | 12 | 1857 | import datetime
import re
import sys
try:
import matplotlib.pyplot as plt
except ImportError:
plt = None
time_format = "%Y-%m-%d %H:%M:%S"
d = dict()
start_time = None
start_time = None
sql_count = 0
sql_time = 0
sql_time_dist = dict()
rpc_time = 0
urpc_time = 0
wait_time = 0
qps2time = dict()
rpc_times = []
urpc_times = []
wait_times = []
for l in sys.stdin:
m = re.search(r'trace_id=\[(\d+)\] sql=\[.*\] sql_to_logicalplan=\[\d+\] logicalplan_to_physicalplan=\[\d+\] handle_sql_time=\[(\d+)\] wait_sql_queue_time=\[(\d+)\] rpc:channel_id=\[\d+\] latency=\[(\d+)\] print_time=\[(\d+)\]', l)
if m is not None:
end_time = int(m.group(5))
if start_time is None:
start_time = end_time
trace_id = m.group(1)
ts = m.group(5)[:-6]
d[trace_id] = dict(
sql_time = int(m.group(2)),
wait_time = int(m.group(3)),
rpc_time = int(m.group(4)),
)
sql_count += 1
sql_time += d[trace_id]['sql_time']
if sql_time_dist.has_key(d[trace_id]['sql_time']):
sql_time_dist[d[trace_id]['sql_time']] += 1
else:
sql_time_dist[d[trace_id]['sql_time']] = 0
wait_time += d[trace_id]['wait_time']
wait_times.append(d[trace_id]['wait_time'])
rpc_time += d[trace_id]['rpc_time']
rpc_times.append(d[trace_id]['rpc_time'])
if qps2time.has_key(ts):
qps2time[ts] += 1
else:
qps2time[ts] = 0
elapsed_seconds = (end_time - start_time) / 10**6
qps = sql_count / elapsed_seconds
avg_sql_time = float(sql_time) / sql_count
avg_rpc_time = float(rpc_time) / sql_count
avg_urpc_time = float(urpc_time) / sql_count
avg_wait_time = float(wait_time) / sql_count
print "QPS: %d" % (qps)
print "AVG TIME: %f" % (avg_sql_time)
print "AVG RPC TIME: %f" % (avg_rpc_time)
print "AVG WAIT TIME: %f" % (avg_wait_time)
| gpl-2.0 |
memo/tensorflow | tensorflow/contrib/learn/python/learn/estimators/dnn_test.py | 9 | 59208 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for DNNEstimators."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import json
import tempfile
import numpy as np
from tensorflow.contrib.layers.python.layers import feature_column
from tensorflow.contrib.learn.python.learn import experiment
from tensorflow.contrib.learn.python.learn.datasets import base
from tensorflow.contrib.learn.python.learn.estimators import _sklearn
from tensorflow.contrib.learn.python.learn.estimators import dnn
from tensorflow.contrib.learn.python.learn.estimators import dnn_linear_combined
from tensorflow.contrib.learn.python.learn.estimators import estimator
from tensorflow.contrib.learn.python.learn.estimators import estimator_test_utils
from tensorflow.contrib.learn.python.learn.estimators import head as head_lib
from tensorflow.contrib.learn.python.learn.estimators import model_fn
from tensorflow.contrib.learn.python.learn.estimators import run_config
from tensorflow.contrib.learn.python.learn.estimators import test_data
from tensorflow.contrib.learn.python.learn.metric_spec import MetricSpec
from tensorflow.contrib.metrics.python.ops import metric_ops
from tensorflow.python.feature_column import feature_column as fc_core
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import test
from tensorflow.python.training import input as input_lib
from tensorflow.python.training import monitored_session
from tensorflow.python.training import server_lib
class EmbeddingMultiplierTest(test.TestCase):
"""dnn_model_fn tests."""
def testRaisesNonEmbeddingColumn(self):
one_hot_language = feature_column.one_hot_column(
feature_column.sparse_column_with_hash_bucket('language', 10))
params = {
'feature_columns': [one_hot_language],
'head': head_lib.multi_class_head(2),
'hidden_units': [1],
# Set lr mult to 0. to keep embeddings constant.
'embedding_lr_multipliers': {
one_hot_language: 0.0
},
}
features = {
'language':
sparse_tensor.SparseTensor(
values=['en', 'fr', 'zh'],
indices=[[0, 0], [1, 0], [2, 0]],
dense_shape=[3, 1]),
}
labels = constant_op.constant([[0], [0], [0]], dtype=dtypes.int32)
with self.assertRaisesRegexp(ValueError,
'can only be defined for embedding columns'):
dnn._dnn_model_fn(features, labels, model_fn.ModeKeys.TRAIN, params)
def testMultipliesGradient(self):
embedding_language = feature_column.embedding_column(
feature_column.sparse_column_with_hash_bucket('language', 10),
dimension=1,
initializer=init_ops.constant_initializer(0.1))
embedding_wire = feature_column.embedding_column(
feature_column.sparse_column_with_hash_bucket('wire', 10),
dimension=1,
initializer=init_ops.constant_initializer(0.1))
params = {
'feature_columns': [embedding_language, embedding_wire],
'head': head_lib.multi_class_head(2),
'hidden_units': [1],
# Set lr mult to 0. to keep embeddings constant.
'embedding_lr_multipliers': {
embedding_language: 0.0
},
}
features = {
'language':
sparse_tensor.SparseTensor(
values=['en', 'fr', 'zh'],
indices=[[0, 0], [1, 0], [2, 0]],
dense_shape=[3, 1]),
'wire':
sparse_tensor.SparseTensor(
values=['omar', 'stringer', 'marlo'],
indices=[[0, 0], [1, 0], [2, 0]],
dense_shape=[3, 1]),
}
labels = constant_op.constant([[0], [0], [0]], dtype=dtypes.int32)
model_ops = dnn._dnn_model_fn(features, labels, model_fn.ModeKeys.TRAIN,
params)
with monitored_session.MonitoredSession() as sess:
language_var = dnn_linear_combined._get_embedding_variable(
embedding_language, 'dnn', 'dnn/input_from_feature_columns')
wire_var = dnn_linear_combined._get_embedding_variable(
embedding_wire, 'dnn', 'dnn/input_from_feature_columns')
for _ in range(2):
_, language_value, wire_value = sess.run(
[model_ops.train_op, language_var, wire_var])
initial_value = np.full_like(language_value, 0.1)
self.assertTrue(np.all(np.isclose(language_value, initial_value)))
self.assertFalse(np.all(np.isclose(wire_value, initial_value)))
class DNNEstimatorTest(test.TestCase):
def _assertInRange(self, expected_min, expected_max, actual):
self.assertLessEqual(expected_min, actual)
self.assertGreaterEqual(expected_max, actual)
def testExperimentIntegration(self):
exp = experiment.Experiment(
estimator=dnn.DNNClassifier(
n_classes=3,
feature_columns=[
feature_column.real_valued_column(
'feature', dimension=4)
],
hidden_units=[3, 3]),
train_input_fn=test_data.iris_input_multiclass_fn,
eval_input_fn=test_data.iris_input_multiclass_fn)
exp.test()
def testEstimatorContract(self):
estimator_test_utils.assert_estimator_contract(self, dnn.DNNEstimator)
def testTrainWithWeights(self):
"""Tests training with given weight column."""
def _input_fn_train():
# Create 4 rows, one of them (y = x), three of them (y=Not(x))
# First row has more weight than others. Model should fit (y=x) better
# than (y=Not(x)) due to the relative higher weight of the first row.
labels = constant_op.constant([[1], [0], [0], [0]])
features = {
'x': array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
'w': constant_op.constant([[100.], [3.], [2.], [2.]])
}
return features, labels
def _input_fn_eval():
# Create 4 rows (y = x)
labels = constant_op.constant([[1], [1], [1], [1]])
features = {
'x': array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
'w': constant_op.constant([[1.], [1.], [1.], [1.]])
}
return features, labels
dnn_estimator = dnn.DNNEstimator(
head=head_lib.multi_class_head(2, weight_column_name='w'),
feature_columns=[feature_column.real_valued_column('x')],
hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
dnn_estimator.fit(input_fn=_input_fn_train, steps=5)
scores = dnn_estimator.evaluate(input_fn=_input_fn_eval, steps=1)
self._assertInRange(0.0, 1.0, scores['accuracy'])
class DNNClassifierTest(test.TestCase):
def testExperimentIntegration(self):
exp = experiment.Experiment(
estimator=dnn.DNNClassifier(
n_classes=3,
feature_columns=[
feature_column.real_valued_column(
'feature', dimension=4)
],
hidden_units=[3, 3]),
train_input_fn=test_data.iris_input_multiclass_fn,
eval_input_fn=test_data.iris_input_multiclass_fn)
exp.test()
def _assertInRange(self, expected_min, expected_max, actual):
self.assertLessEqual(expected_min, actual)
self.assertGreaterEqual(expected_max, actual)
def testEstimatorContract(self):
estimator_test_utils.assert_estimator_contract(self, dnn.DNNClassifier)
def testEmbeddingMultiplier(self):
embedding_language = feature_column.embedding_column(
feature_column.sparse_column_with_hash_bucket('language', 10),
dimension=1,
initializer=init_ops.constant_initializer(0.1))
classifier = dnn.DNNClassifier(
feature_columns=[embedding_language],
hidden_units=[3, 3],
embedding_lr_multipliers={embedding_language: 0.8})
self.assertEqual({
embedding_language: 0.8
}, classifier.params['embedding_lr_multipliers'])
def testInputPartitionSize(self):
def _input_fn_float_label(num_epochs=None):
features = {
'language':
sparse_tensor.SparseTensor(
values=input_lib.limit_epochs(
['en', 'fr', 'zh'], num_epochs=num_epochs),
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
labels = constant_op.constant([[0.8], [0.], [0.2]], dtype=dtypes.float32)
return features, labels
language_column = feature_column.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20)
feature_columns = [
feature_column.embedding_column(language_column, dimension=1),
]
# Set num_ps_replica to be 10 and the min slice size to be extremely small,
# so as to ensure that there'll be 10 partititions produced.
config = run_config.RunConfig(tf_random_seed=1)
config._num_ps_replicas = 10
classifier = dnn.DNNClassifier(
n_classes=2,
feature_columns=feature_columns,
hidden_units=[3, 3],
optimizer='Adagrad',
config=config,
input_layer_min_slice_size=1)
# Ensure the param is passed in.
self.assertEqual(1, classifier.params['input_layer_min_slice_size'])
# Ensure the partition count is 10.
classifier.fit(input_fn=_input_fn_float_label, steps=50)
partition_count = 0
for name in classifier.get_variable_names():
if 'language_embedding' in name and 'Adagrad' in name:
partition_count += 1
self.assertEqual(10, partition_count)
def testLogisticRegression_MatrixData(self):
"""Tests binary classification using matrix data as input."""
cont_features = [feature_column.real_valued_column('feature', dimension=4)]
classifier = dnn.DNNClassifier(
feature_columns=cont_features,
hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
input_fn = test_data.iris_input_logistic_fn
classifier.fit(input_fn=input_fn, steps=5)
scores = classifier.evaluate(input_fn=input_fn, steps=1)
self._assertInRange(0.0, 1.0, scores['accuracy'])
self.assertIn('loss', scores)
def testLogisticRegression_MatrixData_Labels1D(self):
"""Same as the last test, but label shape is [100] instead of [100, 1]."""
def _input_fn():
iris = test_data.prepare_iris_data_for_logistic_regression()
return {
'feature': constant_op.constant(
iris.data, dtype=dtypes.float32)
}, constant_op.constant(
iris.target, shape=[100], dtype=dtypes.int32)
cont_features = [feature_column.real_valued_column('feature', dimension=4)]
classifier = dnn.DNNClassifier(
feature_columns=cont_features,
hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
classifier.fit(input_fn=_input_fn, steps=5)
scores = classifier.evaluate(input_fn=_input_fn, steps=1)
self.assertIn('loss', scores)
def testLogisticRegression_NpMatrixData(self):
"""Tests binary classification using numpy matrix data as input."""
iris = test_data.prepare_iris_data_for_logistic_regression()
train_x = iris.data
train_y = iris.target
feature_columns = [feature_column.real_valued_column('', dimension=4)]
classifier = dnn.DNNClassifier(
feature_columns=feature_columns,
hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
classifier.fit(x=train_x, y=train_y, steps=5)
scores = classifier.evaluate(x=train_x, y=train_y, steps=1)
self._assertInRange(0.0, 1.0, scores['accuracy'])
def _assertBinaryPredictions(self, expected_len, predictions):
self.assertEqual(expected_len, len(predictions))
for prediction in predictions:
self.assertIn(prediction, (0, 1))
def _assertProbabilities(self, expected_batch_size, expected_n_classes,
probabilities):
self.assertEqual(expected_batch_size, len(probabilities))
for b in range(expected_batch_size):
self.assertEqual(expected_n_classes, len(probabilities[b]))
for i in range(expected_n_classes):
self._assertInRange(0.0, 1.0, probabilities[b][i])
def testEstimatorWithCoreFeatureColumns(self):
def _input_fn(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[.8], [0.2], [.1]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=input_lib.limit_epochs(
['en', 'fr', 'zh'], num_epochs=num_epochs),
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
return features, constant_op.constant([[1], [0], [0]], dtype=dtypes.int32)
language_column = fc_core.categorical_column_with_hash_bucket(
'language', hash_bucket_size=20)
feature_columns = [
fc_core.embedding_column(language_column, dimension=1),
fc_core.numeric_column('age')
]
classifier = dnn.DNNClassifier(
n_classes=2,
feature_columns=feature_columns,
hidden_units=[10, 10],
config=run_config.RunConfig(tf_random_seed=1))
classifier.fit(input_fn=_input_fn, steps=50)
scores = classifier.evaluate(input_fn=_input_fn, steps=1)
self._assertInRange(0.0, 1.0, scores['accuracy'])
self.assertIn('loss', scores)
predict_input_fn = functools.partial(_input_fn, num_epochs=1)
predicted_classes = list(
classifier.predict_classes(input_fn=predict_input_fn, as_iterable=True))
self._assertBinaryPredictions(3, predicted_classes)
predictions = list(
classifier.predict(input_fn=predict_input_fn, as_iterable=True))
self.assertAllEqual(predicted_classes, predictions)
def testLogisticRegression_TensorData(self):
"""Tests binary classification using tensor data as input."""
def _input_fn(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[.8], [0.2], [.1]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=input_lib.limit_epochs(
['en', 'fr', 'zh'], num_epochs=num_epochs),
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
return features, constant_op.constant([[1], [0], [0]], dtype=dtypes.int32)
language_column = feature_column.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20)
feature_columns = [
feature_column.embedding_column(
language_column, dimension=1),
feature_column.real_valued_column('age')
]
classifier = dnn.DNNClassifier(
n_classes=2,
feature_columns=feature_columns,
hidden_units=[10, 10],
config=run_config.RunConfig(tf_random_seed=1))
classifier.fit(input_fn=_input_fn, steps=50)
scores = classifier.evaluate(input_fn=_input_fn, steps=1)
self._assertInRange(0.0, 1.0, scores['accuracy'])
self.assertIn('loss', scores)
predict_input_fn = functools.partial(_input_fn, num_epochs=1)
predicted_classes = list(
classifier.predict_classes(
input_fn=predict_input_fn, as_iterable=True))
self._assertBinaryPredictions(3, predicted_classes)
predictions = list(
classifier.predict(input_fn=predict_input_fn, as_iterable=True))
self.assertAllEqual(predicted_classes, predictions)
def testLogisticRegression_FloatLabel(self):
"""Tests binary classification with float labels."""
def _input_fn_float_label(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[50], [20], [10]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=input_lib.limit_epochs(
['en', 'fr', 'zh'], num_epochs=num_epochs),
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
labels = constant_op.constant([[0.8], [0.], [0.2]], dtype=dtypes.float32)
return features, labels
language_column = feature_column.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20)
feature_columns = [
feature_column.embedding_column(
language_column, dimension=1),
feature_column.real_valued_column('age')
]
classifier = dnn.DNNClassifier(
n_classes=2,
feature_columns=feature_columns,
hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
classifier.fit(input_fn=_input_fn_float_label, steps=50)
predict_input_fn = functools.partial(_input_fn_float_label, num_epochs=1)
predicted_classes = list(
classifier.predict_classes(
input_fn=predict_input_fn, as_iterable=True))
self._assertBinaryPredictions(3, predicted_classes)
predictions = list(
classifier.predict(
input_fn=predict_input_fn, as_iterable=True))
self.assertAllEqual(predicted_classes, predictions)
predictions_proba = list(
classifier.predict_proba(
input_fn=predict_input_fn, as_iterable=True))
self._assertProbabilities(3, 2, predictions_proba)
def testMultiClass_MatrixData(self):
"""Tests multi-class classification using matrix data as input."""
cont_features = [feature_column.real_valued_column('feature', dimension=4)]
classifier = dnn.DNNClassifier(
n_classes=3,
feature_columns=cont_features,
hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
input_fn = test_data.iris_input_multiclass_fn
classifier.fit(input_fn=input_fn, steps=200)
scores = classifier.evaluate(input_fn=input_fn, steps=1)
self._assertInRange(0.0, 1.0, scores['accuracy'])
self.assertIn('loss', scores)
def testMultiClass_MatrixData_Labels1D(self):
"""Same as the last test, but label shape is [150] instead of [150, 1]."""
def _input_fn():
iris = base.load_iris()
return {
'feature': constant_op.constant(
iris.data, dtype=dtypes.float32)
}, constant_op.constant(
iris.target, shape=[150], dtype=dtypes.int32)
cont_features = [feature_column.real_valued_column('feature', dimension=4)]
classifier = dnn.DNNClassifier(
n_classes=3,
feature_columns=cont_features,
hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
classifier.fit(input_fn=_input_fn, steps=200)
scores = classifier.evaluate(input_fn=_input_fn, steps=1)
self._assertInRange(0.0, 1.0, scores['accuracy'])
def testMultiClass_NpMatrixData(self):
"""Tests multi-class classification using numpy matrix data as input."""
iris = base.load_iris()
train_x = iris.data
train_y = iris.target
feature_columns = [feature_column.real_valued_column('', dimension=4)]
classifier = dnn.DNNClassifier(
n_classes=3,
feature_columns=feature_columns,
hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
classifier.fit(x=train_x, y=train_y, steps=200)
scores = classifier.evaluate(x=train_x, y=train_y, steps=1)
self._assertInRange(0.0, 1.0, scores['accuracy'])
def testMultiClassLabelKeys(self):
"""Tests n_classes > 2 with label_keys vocabulary for labels."""
# Byte literals needed for python3 test to pass.
label_keys = [b'label0', b'label1', b'label2']
def _input_fn(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[.8], [0.2], [.1]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=input_lib.limit_epochs(
['en', 'fr', 'zh'], num_epochs=num_epochs),
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
labels = constant_op.constant(
[[label_keys[1]], [label_keys[0]], [label_keys[0]]],
dtype=dtypes.string)
return features, labels
language_column = feature_column.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20)
feature_columns = [
feature_column.embedding_column(
language_column, dimension=1),
feature_column.real_valued_column('age')
]
classifier = dnn.DNNClassifier(
n_classes=3,
feature_columns=feature_columns,
hidden_units=[10, 10],
label_keys=label_keys,
config=run_config.RunConfig(tf_random_seed=1))
classifier.fit(input_fn=_input_fn, steps=50)
scores = classifier.evaluate(input_fn=_input_fn, steps=1)
self._assertInRange(0.0, 1.0, scores['accuracy'])
self.assertIn('loss', scores)
predict_input_fn = functools.partial(_input_fn, num_epochs=1)
predicted_classes = list(
classifier.predict_classes(
input_fn=predict_input_fn, as_iterable=True))
self.assertEqual(3, len(predicted_classes))
for pred in predicted_classes:
self.assertIn(pred, label_keys)
predictions = list(
classifier.predict(input_fn=predict_input_fn, as_iterable=True))
self.assertAllEqual(predicted_classes, predictions)
def testLoss(self):
"""Tests loss calculation."""
def _input_fn_train():
# Create 4 rows, one of them (y = x), three of them (y=Not(x))
# The logistic prediction should be (y = 0.25).
labels = constant_op.constant([[1], [0], [0], [0]])
features = {'x': array_ops.ones(shape=[4, 1], dtype=dtypes.float32),}
return features, labels
classifier = dnn.DNNClassifier(
n_classes=2,
feature_columns=[feature_column.real_valued_column('x')],
hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
classifier.fit(input_fn=_input_fn_train, steps=5)
scores = classifier.evaluate(input_fn=_input_fn_train, steps=1)
self.assertIn('loss', scores)
def testLossWithWeights(self):
"""Tests loss calculation with weights."""
def _input_fn_train():
# 4 rows with equal weight, one of them (y = x), three of them (y=Not(x))
# The logistic prediction should be (y = 0.25).
labels = constant_op.constant([[1.], [0.], [0.], [0.]])
features = {
'x': array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
'w': constant_op.constant([[1.], [1.], [1.], [1.]])
}
return features, labels
def _input_fn_eval():
# 4 rows, with different weights.
labels = constant_op.constant([[1.], [0.], [0.], [0.]])
features = {
'x': array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
'w': constant_op.constant([[7.], [1.], [1.], [1.]])
}
return features, labels
classifier = dnn.DNNClassifier(
weight_column_name='w',
n_classes=2,
feature_columns=[feature_column.real_valued_column('x')],
hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
classifier.fit(input_fn=_input_fn_train, steps=5)
scores = classifier.evaluate(input_fn=_input_fn_eval, steps=1)
self.assertIn('loss', scores)
def testTrainWithWeights(self):
"""Tests training with given weight column."""
def _input_fn_train():
# Create 4 rows, one of them (y = x), three of them (y=Not(x))
# First row has more weight than others. Model should fit (y=x) better
# than (y=Not(x)) due to the relative higher weight of the first row.
labels = constant_op.constant([[1], [0], [0], [0]])
features = {
'x': array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
'w': constant_op.constant([[100.], [3.], [2.], [2.]])
}
return features, labels
def _input_fn_eval():
# Create 4 rows (y = x)
labels = constant_op.constant([[1], [1], [1], [1]])
features = {
'x': array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
'w': constant_op.constant([[1.], [1.], [1.], [1.]])
}
return features, labels
classifier = dnn.DNNClassifier(
weight_column_name='w',
feature_columns=[feature_column.real_valued_column('x')],
hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
classifier.fit(input_fn=_input_fn_train, steps=5)
scores = classifier.evaluate(input_fn=_input_fn_eval, steps=1)
self._assertInRange(0.0, 1.0, scores['accuracy'])
def testPredict_AsIterableFalse(self):
"""Tests predict and predict_prob methods with as_iterable=False."""
def _input_fn(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[.8], [.2], [.1]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=input_lib.limit_epochs(
['en', 'fr', 'zh'], num_epochs=num_epochs),
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
return features, constant_op.constant([[1], [0], [0]], dtype=dtypes.int32)
sparse_column = feature_column.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20)
feature_columns = [
feature_column.embedding_column(
sparse_column, dimension=1)
]
n_classes = 3
classifier = dnn.DNNClassifier(
n_classes=n_classes,
feature_columns=feature_columns,
hidden_units=[10, 10],
config=run_config.RunConfig(tf_random_seed=1))
classifier.fit(input_fn=_input_fn, steps=100)
scores = classifier.evaluate(input_fn=_input_fn, steps=1)
self._assertInRange(0.0, 1.0, scores['accuracy'])
self.assertIn('loss', scores)
predicted_classes = classifier.predict_classes(
input_fn=_input_fn, as_iterable=False)
self._assertBinaryPredictions(3, predicted_classes)
predictions = classifier.predict(input_fn=_input_fn, as_iterable=False)
self.assertAllEqual(predicted_classes, predictions)
probabilities = classifier.predict_proba(
input_fn=_input_fn, as_iterable=False)
self._assertProbabilities(3, n_classes, probabilities)
def testPredict_AsIterable(self):
"""Tests predict and predict_prob methods with as_iterable=True."""
def _input_fn(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[.8], [.2], [.1]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=input_lib.limit_epochs(
['en', 'fr', 'zh'], num_epochs=num_epochs),
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
return features, constant_op.constant([[1], [0], [0]], dtype=dtypes.int32)
language_column = feature_column.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20)
feature_columns = [
feature_column.embedding_column(
language_column, dimension=1),
feature_column.real_valued_column('age')
]
classifier = dnn.DNNClassifier(
n_classes=3,
feature_columns=feature_columns,
hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
classifier.fit(input_fn=_input_fn, steps=200)
scores = classifier.evaluate(input_fn=_input_fn, steps=1)
self._assertInRange(0.0, 1.0, scores['accuracy'])
self.assertIn('loss', scores)
predict_input_fn = functools.partial(_input_fn, num_epochs=1)
predicted_classes = list(
classifier.predict_classes(
input_fn=predict_input_fn, as_iterable=True))
self.assertListEqual(predicted_classes, [1, 0, 0])
predictions = list(
classifier.predict(
input_fn=predict_input_fn, as_iterable=True))
self.assertAllEqual(predicted_classes, predictions)
predicted_proba = list(
classifier.predict_proba(
input_fn=predict_input_fn, as_iterable=True))
self.assertAllClose(
predicted_proba, [[0., 1., 0.], [1., 0., 0.], [1., 0., 0.]], atol=0.3)
def testCustomMetrics(self):
"""Tests custom evaluation metrics."""
def _input_fn(num_epochs=None):
# Create 4 rows, one of them (y = x), three of them (y=Not(x))
labels = constant_op.constant([[1], [0], [0], [0]])
features = {
'x':
input_lib.limit_epochs(
array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
num_epochs=num_epochs),
}
return features, labels
def _my_metric_op(predictions, labels):
# For the case of binary classification, the 2nd column of "predictions"
# denotes the model predictions.
labels = math_ops.to_float(labels)
predictions = array_ops.strided_slice(
predictions, [0, 1], [-1, 2], end_mask=1)
labels = math_ops.cast(labels, predictions.dtype)
return math_ops.reduce_sum(math_ops.multiply(predictions, labels))
classifier = dnn.DNNClassifier(
feature_columns=[feature_column.real_valued_column('x')],
hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
classifier.fit(input_fn=_input_fn, steps=5)
scores = classifier.evaluate(
input_fn=_input_fn,
steps=5,
metrics={
'my_accuracy':
MetricSpec(
metric_fn=metric_ops.streaming_accuracy,
prediction_key='classes'),
'my_precision':
MetricSpec(
metric_fn=metric_ops.streaming_precision,
prediction_key='classes'),
'my_metric':
MetricSpec(
metric_fn=_my_metric_op, prediction_key='probabilities')
})
self.assertTrue(
set(['loss', 'my_accuracy', 'my_precision', 'my_metric']).issubset(
set(scores.keys())))
predict_input_fn = functools.partial(_input_fn, num_epochs=1)
predictions = np.array(list(classifier.predict_classes(
input_fn=predict_input_fn)))
self.assertEqual(
_sklearn.accuracy_score([1, 0, 0, 0], predictions),
scores['my_accuracy'])
# Test the case where the 2nd element of the key is neither "classes" nor
# "probabilities".
with self.assertRaisesRegexp(KeyError, 'bad_type'):
classifier.evaluate(
input_fn=_input_fn,
steps=5,
metrics={
'bad_name':
MetricSpec(
metric_fn=metric_ops.streaming_auc,
prediction_key='bad_type')
})
def testTrainSaveLoad(self):
"""Tests that insures you can save and reload a trained model."""
def _input_fn(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[.8], [.2], [.1]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=input_lib.limit_epochs(
['en', 'fr', 'zh'], num_epochs=num_epochs),
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
return features, constant_op.constant([[1], [0], [0]], dtype=dtypes.int32)
sparse_column = feature_column.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20)
feature_columns = [
feature_column.embedding_column(
sparse_column, dimension=1)
]
model_dir = tempfile.mkdtemp()
classifier = dnn.DNNClassifier(
model_dir=model_dir,
n_classes=3,
feature_columns=feature_columns,
hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
classifier.fit(input_fn=_input_fn, steps=5)
predict_input_fn = functools.partial(_input_fn, num_epochs=1)
predictions1 = classifier.predict_classes(input_fn=predict_input_fn)
del classifier
classifier2 = dnn.DNNClassifier(
model_dir=model_dir,
n_classes=3,
feature_columns=feature_columns,
hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
predictions2 = classifier2.predict_classes(input_fn=predict_input_fn)
self.assertEqual(list(predictions1), list(predictions2))
def testTrainWithPartitionedVariables(self):
"""Tests training with partitioned variables."""
def _input_fn(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[.8], [.2], [.1]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=input_lib.limit_epochs(
['en', 'fr', 'zh'], num_epochs=num_epochs),
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
return features, constant_op.constant([[1], [0], [0]], dtype=dtypes.int32)
# The given hash_bucket_size results in variables larger than the
# default min_slice_size attribute, so the variables are partitioned.
sparse_column = feature_column.sparse_column_with_hash_bucket(
'language', hash_bucket_size=2e7)
feature_columns = [
feature_column.embedding_column(
sparse_column, dimension=1)
]
tf_config = {
'cluster': {
run_config.TaskType.PS: ['fake_ps_0', 'fake_ps_1']
}
}
with test.mock.patch.dict('os.environ',
{'TF_CONFIG': json.dumps(tf_config)}):
config = run_config.RunConfig(tf_random_seed=1)
# Because we did not start a distributed cluster, we need to pass an
# empty ClusterSpec, otherwise the device_setter will look for
# distributed jobs, such as "/job:ps" which are not present.
config._cluster_spec = server_lib.ClusterSpec({})
classifier = dnn.DNNClassifier(
n_classes=3,
feature_columns=feature_columns,
hidden_units=[3, 3],
config=config)
classifier.fit(input_fn=_input_fn, steps=5)
scores = classifier.evaluate(input_fn=_input_fn, steps=1)
self._assertInRange(0.0, 1.0, scores['accuracy'])
self.assertIn('loss', scores)
def testExport(self):
"""Tests export model for servo."""
def input_fn():
return {
'age':
constant_op.constant([1]),
'language':
sparse_tensor.SparseTensor(
values=['english'], indices=[[0, 0]], dense_shape=[1, 1])
}, constant_op.constant([[1]])
language = feature_column.sparse_column_with_hash_bucket('language', 100)
feature_columns = [
feature_column.real_valued_column('age'),
feature_column.embedding_column(
language, dimension=1)
]
classifier = dnn.DNNClassifier(
feature_columns=feature_columns, hidden_units=[3, 3])
classifier.fit(input_fn=input_fn, steps=5)
export_dir = tempfile.mkdtemp()
classifier.export(export_dir)
def testEnableCenteredBias(self):
"""Tests that we can enable centered bias."""
cont_features = [feature_column.real_valued_column('feature', dimension=4)]
classifier = dnn.DNNClassifier(
n_classes=3,
feature_columns=cont_features,
hidden_units=[3, 3],
enable_centered_bias=True,
config=run_config.RunConfig(tf_random_seed=1))
input_fn = test_data.iris_input_multiclass_fn
classifier.fit(input_fn=input_fn, steps=5)
self.assertIn('dnn/multi_class_head/centered_bias_weight',
classifier.get_variable_names())
scores = classifier.evaluate(input_fn=input_fn, steps=1)
self._assertInRange(0.0, 1.0, scores['accuracy'])
self.assertIn('loss', scores)
def testDisableCenteredBias(self):
"""Tests that we can disable centered bias."""
cont_features = [feature_column.real_valued_column('feature', dimension=4)]
classifier = dnn.DNNClassifier(
n_classes=3,
feature_columns=cont_features,
hidden_units=[3, 3],
enable_centered_bias=False,
config=run_config.RunConfig(tf_random_seed=1))
input_fn = test_data.iris_input_multiclass_fn
classifier.fit(input_fn=input_fn, steps=5)
self.assertNotIn('centered_bias_weight', classifier.get_variable_names())
scores = classifier.evaluate(input_fn=input_fn, steps=1)
self._assertInRange(0.0, 1.0, scores['accuracy'])
self.assertIn('loss', scores)
class DNNRegressorTest(test.TestCase):
def testExperimentIntegration(self):
exp = experiment.Experiment(
estimator=dnn.DNNRegressor(
feature_columns=[
feature_column.real_valued_column(
'feature', dimension=4)
],
hidden_units=[3, 3]),
train_input_fn=test_data.iris_input_logistic_fn,
eval_input_fn=test_data.iris_input_logistic_fn)
exp.test()
def testEstimatorContract(self):
estimator_test_utils.assert_estimator_contract(self, dnn.DNNRegressor)
def testRegression_MatrixData(self):
"""Tests regression using matrix data as input."""
cont_features = [feature_column.real_valued_column('feature', dimension=4)]
regressor = dnn.DNNRegressor(
feature_columns=cont_features,
hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
input_fn = test_data.iris_input_logistic_fn
regressor.fit(input_fn=input_fn, steps=200)
scores = regressor.evaluate(input_fn=input_fn, steps=1)
self.assertIn('loss', scores)
def testRegression_MatrixData_Labels1D(self):
"""Same as the last test, but label shape is [100] instead of [100, 1]."""
def _input_fn():
iris = test_data.prepare_iris_data_for_logistic_regression()
return {
'feature': constant_op.constant(
iris.data, dtype=dtypes.float32)
}, constant_op.constant(
iris.target, shape=[100], dtype=dtypes.int32)
cont_features = [feature_column.real_valued_column('feature', dimension=4)]
regressor = dnn.DNNRegressor(
feature_columns=cont_features,
hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn, steps=200)
scores = regressor.evaluate(input_fn=_input_fn, steps=1)
self.assertIn('loss', scores)
def testRegression_NpMatrixData(self):
"""Tests binary classification using numpy matrix data as input."""
iris = test_data.prepare_iris_data_for_logistic_regression()
train_x = iris.data
train_y = iris.target
feature_columns = [feature_column.real_valued_column('', dimension=4)]
regressor = dnn.DNNRegressor(
feature_columns=feature_columns,
hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(x=train_x, y=train_y, steps=200)
scores = regressor.evaluate(x=train_x, y=train_y, steps=1)
self.assertIn('loss', scores)
def testRegression_TensorData(self):
"""Tests regression using tensor data as input."""
def _input_fn(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[.8], [.15], [0.]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=input_lib.limit_epochs(
['en', 'fr', 'zh'], num_epochs=num_epochs),
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
return features, constant_op.constant([1., 0., 0.2], dtype=dtypes.float32)
language_column = feature_column.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20)
feature_columns = [
feature_column.embedding_column(
language_column, dimension=1),
feature_column.real_valued_column('age')
]
regressor = dnn.DNNRegressor(
feature_columns=feature_columns,
hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn, steps=200)
scores = regressor.evaluate(input_fn=_input_fn, steps=1)
self.assertIn('loss', scores)
def testLoss(self):
"""Tests loss calculation."""
def _input_fn_train():
# Create 4 rows, one of them (y = x), three of them (y=Not(x))
# The algorithm should learn (y = 0.25).
labels = constant_op.constant([[1.], [0.], [0.], [0.]])
features = {'x': array_ops.ones(shape=[4, 1], dtype=dtypes.float32),}
return features, labels
regressor = dnn.DNNRegressor(
feature_columns=[feature_column.real_valued_column('x')],
hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn_train, steps=5)
scores = regressor.evaluate(input_fn=_input_fn_train, steps=1)
self.assertIn('loss', scores)
def testLossWithWeights(self):
"""Tests loss calculation with weights."""
def _input_fn_train():
# 4 rows with equal weight, one of them (y = x), three of them (y=Not(x))
# The algorithm should learn (y = 0.25).
labels = constant_op.constant([[1.], [0.], [0.], [0.]])
features = {
'x': array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
'w': constant_op.constant([[1.], [1.], [1.], [1.]])
}
return features, labels
def _input_fn_eval():
# 4 rows, with different weights.
labels = constant_op.constant([[1.], [0.], [0.], [0.]])
features = {
'x': array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
'w': constant_op.constant([[7.], [1.], [1.], [1.]])
}
return features, labels
regressor = dnn.DNNRegressor(
weight_column_name='w',
feature_columns=[feature_column.real_valued_column('x')],
hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn_train, steps=5)
scores = regressor.evaluate(input_fn=_input_fn_eval, steps=1)
self.assertIn('loss', scores)
def testTrainWithWeights(self):
"""Tests training with given weight column."""
def _input_fn_train():
# Create 4 rows, one of them (y = x), three of them (y=Not(x))
# First row has more weight than others. Model should fit (y=x) better
# than (y=Not(x)) due to the relative higher weight of the first row.
labels = constant_op.constant([[1.], [0.], [0.], [0.]])
features = {
'x': array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
'w': constant_op.constant([[100.], [3.], [2.], [2.]])
}
return features, labels
def _input_fn_eval():
# Create 4 rows (y = x)
labels = constant_op.constant([[1.], [1.], [1.], [1.]])
features = {
'x': array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
'w': constant_op.constant([[1.], [1.], [1.], [1.]])
}
return features, labels
regressor = dnn.DNNRegressor(
weight_column_name='w',
feature_columns=[feature_column.real_valued_column('x')],
hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn_train, steps=5)
scores = regressor.evaluate(input_fn=_input_fn_eval, steps=1)
self.assertIn('loss', scores)
def testPredict_AsIterableFalse(self):
"""Tests predict method with as_iterable=False."""
labels = [1., 0., 0.2]
def _input_fn(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[0.8], [0.15], [0.]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=input_lib.limit_epochs(
['en', 'fr', 'zh'], num_epochs=num_epochs),
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
return features, constant_op.constant(labels, dtype=dtypes.float32)
sparse_column = feature_column.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20)
feature_columns = [
feature_column.embedding_column(
sparse_column, dimension=1),
feature_column.real_valued_column('age')
]
regressor = dnn.DNNRegressor(
feature_columns=feature_columns,
hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn, steps=200)
scores = regressor.evaluate(input_fn=_input_fn, steps=1)
self.assertIn('loss', scores)
predicted_scores = regressor.predict_scores(
input_fn=_input_fn, as_iterable=False)
self.assertAllClose(labels, predicted_scores, atol=0.2)
predictions = regressor.predict(input_fn=_input_fn, as_iterable=False)
self.assertAllClose(predicted_scores, predictions)
def testPredict_AsIterable(self):
"""Tests predict method with as_iterable=True."""
labels = [1., 0., 0.2]
def _input_fn(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[0.8], [0.15], [0.]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=input_lib.limit_epochs(
['en', 'fr', 'zh'], num_epochs=num_epochs),
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
return features, constant_op.constant(labels, dtype=dtypes.float32)
sparse_column = feature_column.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20)
feature_columns = [
feature_column.embedding_column(
sparse_column, dimension=1),
feature_column.real_valued_column('age')
]
regressor = dnn.DNNRegressor(
feature_columns=feature_columns,
hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn, steps=200)
scores = regressor.evaluate(input_fn=_input_fn, steps=1)
self.assertIn('loss', scores)
predict_input_fn = functools.partial(_input_fn, num_epochs=1)
predicted_scores = list(
regressor.predict_scores(
input_fn=predict_input_fn, as_iterable=True))
self.assertAllClose(labels, predicted_scores, atol=0.2)
predictions = list(
regressor.predict(input_fn=predict_input_fn, as_iterable=True))
self.assertAllClose(predicted_scores, predictions)
def testCustomMetrics(self):
"""Tests custom evaluation metrics."""
def _input_fn(num_epochs=None):
# Create 4 rows, one of them (y = x), three of them (y=Not(x))
labels = constant_op.constant([[1.], [0.], [0.], [0.]])
features = {
'x':
input_lib.limit_epochs(
array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
num_epochs=num_epochs),
}
return features, labels
def _my_metric_op(predictions, labels):
return math_ops.reduce_sum(math_ops.multiply(predictions, labels))
regressor = dnn.DNNRegressor(
feature_columns=[feature_column.real_valued_column('x')],
hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn, steps=5)
scores = regressor.evaluate(
input_fn=_input_fn,
steps=1,
metrics={
'my_error': metric_ops.streaming_mean_squared_error,
('my_metric', 'scores'): _my_metric_op
})
self.assertIn('loss', set(scores.keys()))
self.assertIn('my_error', set(scores.keys()))
self.assertIn('my_metric', set(scores.keys()))
predict_input_fn = functools.partial(_input_fn, num_epochs=1)
predictions = np.array(list(regressor.predict_scores(
input_fn=predict_input_fn)))
self.assertAlmostEqual(
_sklearn.mean_squared_error(np.array([1, 0, 0, 0]), predictions),
scores['my_error'])
# Tests the case that the 2nd element of the key is not "scores".
with self.assertRaises(KeyError):
regressor.evaluate(
input_fn=_input_fn,
steps=1,
metrics={
('my_error', 'predictions'):
metric_ops.streaming_mean_squared_error
})
# Tests the case where the tuple of the key doesn't have 2 elements.
with self.assertRaises(ValueError):
regressor.evaluate(
input_fn=_input_fn,
steps=1,
metrics={
('bad_length_name', 'scores', 'bad_length'):
metric_ops.streaming_mean_squared_error
})
def testCustomMetricsWithMetricSpec(self):
"""Tests custom evaluation metrics that use MetricSpec."""
def _input_fn(num_epochs=None):
# Create 4 rows, one of them (y = x), three of them (y=Not(x))
labels = constant_op.constant([[1.], [0.], [0.], [0.]])
features = {
'x':
input_lib.limit_epochs(
array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
num_epochs=num_epochs),
}
return features, labels
def _my_metric_op(predictions, labels):
return math_ops.reduce_sum(math_ops.multiply(predictions, labels))
regressor = dnn.DNNRegressor(
feature_columns=[feature_column.real_valued_column('x')],
hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn, steps=5)
scores = regressor.evaluate(
input_fn=_input_fn,
steps=1,
metrics={
'my_error':
MetricSpec(
metric_fn=metric_ops.streaming_mean_squared_error,
prediction_key='scores'),
'my_metric':
MetricSpec(
metric_fn=_my_metric_op, prediction_key='scores')
})
self.assertIn('loss', set(scores.keys()))
self.assertIn('my_error', set(scores.keys()))
self.assertIn('my_metric', set(scores.keys()))
predict_input_fn = functools.partial(_input_fn, num_epochs=1)
predictions = np.array(list(regressor.predict_scores(
input_fn=predict_input_fn)))
self.assertAlmostEqual(
_sklearn.mean_squared_error(np.array([1, 0, 0, 0]), predictions),
scores['my_error'])
# Tests the case where the prediction_key is not "scores".
with self.assertRaisesRegexp(KeyError, 'bad_type'):
regressor.evaluate(
input_fn=_input_fn,
steps=1,
metrics={
'bad_name':
MetricSpec(
metric_fn=metric_ops.streaming_auc,
prediction_key='bad_type')
})
def testTrainSaveLoad(self):
"""Tests that insures you can save and reload a trained model."""
def _input_fn(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[0.8], [0.15], [0.]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=input_lib.limit_epochs(
['en', 'fr', 'zh'], num_epochs=num_epochs),
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
return features, constant_op.constant([1., 0., 0.2], dtype=dtypes.float32)
sparse_column = feature_column.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20)
feature_columns = [
feature_column.embedding_column(
sparse_column, dimension=1),
feature_column.real_valued_column('age')
]
model_dir = tempfile.mkdtemp()
regressor = dnn.DNNRegressor(
model_dir=model_dir,
feature_columns=feature_columns,
hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn, steps=5)
predict_input_fn = functools.partial(_input_fn, num_epochs=1)
predictions = list(regressor.predict_scores(input_fn=predict_input_fn))
del regressor
regressor2 = dnn.DNNRegressor(
model_dir=model_dir,
feature_columns=feature_columns,
hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
predictions2 = list(regressor2.predict_scores(input_fn=predict_input_fn))
self.assertAllClose(predictions, predictions2)
def testTrainWithPartitionedVariables(self):
"""Tests training with partitioned variables."""
def _input_fn(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[0.8], [0.15], [0.]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=input_lib.limit_epochs(
['en', 'fr', 'zh'], num_epochs=num_epochs),
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
return features, constant_op.constant([1., 0., 0.2], dtype=dtypes.float32)
# The given hash_bucket_size results in variables larger than the
# default min_slice_size attribute, so the variables are partitioned.
sparse_column = feature_column.sparse_column_with_hash_bucket(
'language', hash_bucket_size=2e7)
feature_columns = [
feature_column.embedding_column(
sparse_column, dimension=1),
feature_column.real_valued_column('age')
]
tf_config = {
'cluster': {
run_config.TaskType.PS: ['fake_ps_0', 'fake_ps_1']
}
}
with test.mock.patch.dict('os.environ',
{'TF_CONFIG': json.dumps(tf_config)}):
config = run_config.RunConfig(tf_random_seed=1)
# Because we did not start a distributed cluster, we need to pass an
# empty ClusterSpec, otherwise the device_setter will look for
# distributed jobs, such as "/job:ps" which are not present.
config._cluster_spec = server_lib.ClusterSpec({})
regressor = dnn.DNNRegressor(
feature_columns=feature_columns, hidden_units=[3, 3], config=config)
regressor.fit(input_fn=_input_fn, steps=5)
scores = regressor.evaluate(input_fn=_input_fn, steps=1)
self.assertIn('loss', scores)
def testEnableCenteredBias(self):
"""Tests that we can enable centered bias."""
def _input_fn(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[0.8], [0.15], [0.]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=input_lib.limit_epochs(
['en', 'fr', 'zh'], num_epochs=num_epochs),
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
return features, constant_op.constant([1., 0., 0.2], dtype=dtypes.float32)
sparse_column = feature_column.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20)
feature_columns = [
feature_column.embedding_column(
sparse_column, dimension=1),
feature_column.real_valued_column('age')
]
regressor = dnn.DNNRegressor(
feature_columns=feature_columns,
hidden_units=[3, 3],
enable_centered_bias=True,
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn, steps=5)
self.assertIn('dnn/regression_head/centered_bias_weight',
regressor.get_variable_names())
scores = regressor.evaluate(input_fn=_input_fn, steps=1)
self.assertIn('loss', scores)
def testDisableCenteredBias(self):
"""Tests that we can disable centered bias."""
def _input_fn(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[0.8], [0.15], [0.]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=input_lib.limit_epochs(
['en', 'fr', 'zh'], num_epochs=num_epochs),
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
return features, constant_op.constant([1., 0., 0.2], dtype=dtypes.float32)
sparse_column = feature_column.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20)
feature_columns = [
feature_column.embedding_column(
sparse_column, dimension=1),
feature_column.real_valued_column('age')
]
regressor = dnn.DNNRegressor(
feature_columns=feature_columns,
hidden_units=[3, 3],
enable_centered_bias=False,
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn, steps=5)
self.assertNotIn('centered_bias_weight', regressor.get_variable_names())
scores = regressor.evaluate(input_fn=_input_fn, steps=1)
self.assertIn('loss', scores)
def boston_input_fn():
boston = base.load_boston()
features = math_ops.cast(
array_ops.reshape(constant_op.constant(boston.data), [-1, 13]),
dtypes.float32)
labels = math_ops.cast(
array_ops.reshape(constant_op.constant(boston.target), [-1, 1]),
dtypes.float32)
return features, labels
class FeatureColumnTest(test.TestCase):
def testTrain(self):
feature_columns = estimator.infer_real_valued_columns_from_input_fn(
boston_input_fn)
est = dnn.DNNRegressor(feature_columns=feature_columns, hidden_units=[3, 3])
est.fit(input_fn=boston_input_fn, steps=1)
_ = est.evaluate(input_fn=boston_input_fn, steps=1)
if __name__ == '__main__':
test.main()
| apache-2.0 |
toastedcornflakes/scikit-learn | examples/model_selection/plot_precision_recall.py | 74 | 6377 | """
================
Precision-Recall
================
Example of Precision-Recall metric to evaluate classifier output quality.
In information retrieval, precision is a measure of result relevancy, while
recall is a measure of how many truly relevant results are returned. A high
area under the curve represents both high recall and high precision, where high
precision relates to a low false positive rate, and high recall relates to a
low false negative rate. High scores for both show that the classifier is
returning accurate results (high precision), as well as returning a majority of
all positive results (high recall).
A system with high recall but low precision returns many results, but most of
its predicted labels are incorrect when compared to the training labels. A
system with high precision but low recall is just the opposite, returning very
few results, but most of its predicted labels are correct when compared to the
training labels. An ideal system with high precision and high recall will
return many results, with all results labeled correctly.
Precision (:math:`P`) is defined as the number of true positives (:math:`T_p`)
over the number of true positives plus the number of false positives
(:math:`F_p`).
:math:`P = \\frac{T_p}{T_p+F_p}`
Recall (:math:`R`) is defined as the number of true positives (:math:`T_p`)
over the number of true positives plus the number of false negatives
(:math:`F_n`).
:math:`R = \\frac{T_p}{T_p + F_n}`
These quantities are also related to the (:math:`F_1`) score, which is defined
as the harmonic mean of precision and recall.
:math:`F1 = 2\\frac{P \\times R}{P+R}`
It is important to note that the precision may not decrease with recall. The
definition of precision (:math:`\\frac{T_p}{T_p + F_p}`) shows that lowering
the threshold of a classifier may increase the denominator, by increasing the
number of results returned. If the threshold was previously set too high, the
new results may all be true positives, which will increase precision. If the
previous threshold was about right or too low, further lowering the threshold
will introduce false positives, decreasing precision.
Recall is defined as :math:`\\frac{T_p}{T_p+F_n}`, where :math:`T_p+F_n` does
not depend on the classifier threshold. This means that lowering the classifier
threshold may increase recall, by increasing the number of true positive
results. It is also possible that lowering the threshold may leave recall
unchanged, while the precision fluctuates.
The relationship between recall and precision can be observed in the
stairstep area of the plot - at the edges of these steps a small change
in the threshold considerably reduces precision, with only a minor gain in
recall. See the corner at recall = .59, precision = .8 for an example of this
phenomenon.
Precision-recall curves are typically used in binary classification to study
the output of a classifier. In order to extend Precision-recall curve and
average precision to multi-class or multi-label classification, it is necessary
to binarize the output. One curve can be drawn per label, but one can also draw
a precision-recall curve by considering each element of the label indicator
matrix as a binary prediction (micro-averaging).
.. note::
See also :func:`sklearn.metrics.average_precision_score`,
:func:`sklearn.metrics.recall_score`,
:func:`sklearn.metrics.precision_score`,
:func:`sklearn.metrics.f1_score`
"""
print(__doc__)
import matplotlib.pyplot as plt
import numpy as np
from itertools import cycle
from sklearn import svm, datasets
from sklearn.metrics import precision_recall_curve
from sklearn.metrics import average_precision_score
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import label_binarize
from sklearn.multiclass import OneVsRestClassifier
# import some data to play with
iris = datasets.load_iris()
X = iris.data
y = iris.target
# setup plot details
colors = cycle(['navy', 'turquoise', 'darkorange', 'cornflowerblue', 'teal'])
lw = 2
# Binarize the output
y = label_binarize(y, classes=[0, 1, 2])
n_classes = y.shape[1]
# Add noisy features
random_state = np.random.RandomState(0)
n_samples, n_features = X.shape
X = np.c_[X, random_state.randn(n_samples, 200 * n_features)]
# Split into training and test
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=.5,
random_state=random_state)
# Run classifier
classifier = OneVsRestClassifier(svm.SVC(kernel='linear', probability=True,
random_state=random_state))
y_score = classifier.fit(X_train, y_train).decision_function(X_test)
# Compute Precision-Recall and plot curve
precision = dict()
recall = dict()
average_precision = dict()
for i in range(n_classes):
precision[i], recall[i], _ = precision_recall_curve(y_test[:, i],
y_score[:, i])
average_precision[i] = average_precision_score(y_test[:, i], y_score[:, i])
# Compute micro-average ROC curve and ROC area
precision["micro"], recall["micro"], _ = precision_recall_curve(y_test.ravel(),
y_score.ravel())
average_precision["micro"] = average_precision_score(y_test, y_score,
average="micro")
# Plot Precision-Recall curve
plt.clf()
plt.plot(recall[0], precision[0], lw=lw, color='navy',
label='Precision-Recall curve')
plt.xlabel('Recall')
plt.ylabel('Precision')
plt.ylim([0.0, 1.05])
plt.xlim([0.0, 1.0])
plt.title('Precision-Recall example: AUC={0:0.2f}'.format(average_precision[0]))
plt.legend(loc="lower left")
plt.show()
# Plot Precision-Recall curve for each class
plt.clf()
plt.plot(recall["micro"], precision["micro"], color='gold', lw=lw,
label='micro-average Precision-recall curve (area = {0:0.2f})'
''.format(average_precision["micro"]))
for i, color in zip(range(n_classes), colors):
plt.plot(recall[i], precision[i], color=color, lw=lw,
label='Precision-recall curve of class {0} (area = {1:0.2f})'
''.format(i, average_precision[i]))
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('Recall')
plt.ylabel('Precision')
plt.title('Extension of Precision-Recall curve to multi-class')
plt.legend(loc="lower right")
plt.show()
| bsd-3-clause |
bigdataelephants/scikit-learn | sklearn/decomposition/__init__.py | 99 | 1331 | """
The :mod:`sklearn.decomposition` module includes matrix decomposition
algorithms, including among others PCA, NMF or ICA. Most of the algorithms of
this module can be regarded as dimensionality reduction techniques.
"""
from .nmf import NMF, ProjectedGradientNMF
from .pca import PCA, RandomizedPCA
from .incremental_pca import IncrementalPCA
from .kernel_pca import KernelPCA
from .sparse_pca import SparsePCA, MiniBatchSparsePCA
from .truncated_svd import TruncatedSVD
from .fastica_ import FastICA, fastica
from .dict_learning import (dict_learning, dict_learning_online, sparse_encode,
DictionaryLearning, MiniBatchDictionaryLearning,
SparseCoder)
from .factor_analysis import FactorAnalysis
from ..utils.extmath import randomized_svd
__all__ = ['DictionaryLearning',
'FastICA',
'IncrementalPCA',
'KernelPCA',
'MiniBatchDictionaryLearning',
'MiniBatchSparsePCA',
'NMF',
'PCA',
'ProjectedGradientNMF',
'RandomizedPCA',
'SparseCoder',
'SparsePCA',
'dict_learning',
'dict_learning_online',
'fastica',
'randomized_svd',
'sparse_encode',
'FactorAnalysis',
'TruncatedSVD']
| bsd-3-clause |
liberatorqjw/scikit-learn | sklearn/datasets/lfw.py | 28 | 17953 | """Loader for the Labeled Faces in the Wild (LFW) dataset
This dataset is a collection of JPEG pictures of famous people collected
over the internet, all details are available on the official website:
http://vis-www.cs.umass.edu/lfw/
Each picture is centered on a single face. The typical task is called
Face Verification: given a pair of two pictures, a binary classifier
must predict whether the two images are from the same person.
An alternative task, Face Recognition or Face Identification is:
given the picture of the face of an unknown person, identify the name
of the person by referring to a gallery of previously seen pictures of
identified persons.
Both Face Verification and Face Recognition are tasks that are typically
performed on the output of a model trained to perform Face Detection. The
most popular model for Face Detection is called Viola-Johns and is
implemented in the OpenCV library. The LFW faces were extracted by this face
detector from various online websites.
"""
# Copyright (c) 2011 Olivier Grisel <olivier.grisel@ensta.org>
# License: BSD 3 clause
from os import listdir, makedirs, remove
from os.path import join, exists, isdir
import logging
import numpy as np
try:
import urllib.request as urllib #for backwards compatibility
except ImportError:
import urllib
from .base import get_data_home, Bunch
from ..externals.joblib import Memory
from ..externals.six import b
logger = logging.getLogger(__name__)
BASE_URL = "http://vis-www.cs.umass.edu/lfw/"
ARCHIVE_NAME = "lfw.tgz"
FUNNELED_ARCHIVE_NAME = "lfw-funneled.tgz"
TARGET_FILENAMES = [
'pairsDevTrain.txt',
'pairsDevTest.txt',
'pairs.txt',
]
def scale_face(face):
"""Scale back to 0-1 range in case of normalization for plotting"""
scaled = face - face.min()
scaled /= scaled.max()
return scaled
#
# Common private utilities for data fetching from the original LFW website
# local disk caching, and image decoding.
#
def check_fetch_lfw(data_home=None, funneled=True, download_if_missing=True):
"""Helper function to download any missing LFW data"""
data_home = get_data_home(data_home=data_home)
lfw_home = join(data_home, "lfw_home")
if funneled:
archive_path = join(lfw_home, FUNNELED_ARCHIVE_NAME)
data_folder_path = join(lfw_home, "lfw_funneled")
archive_url = BASE_URL + FUNNELED_ARCHIVE_NAME
else:
archive_path = join(lfw_home, ARCHIVE_NAME)
data_folder_path = join(lfw_home, "lfw")
archive_url = BASE_URL + ARCHIVE_NAME
if not exists(lfw_home):
makedirs(lfw_home)
for target_filename in TARGET_FILENAMES:
target_filepath = join(lfw_home, target_filename)
if not exists(target_filepath):
if download_if_missing:
url = BASE_URL + target_filename
logger.warn("Downloading LFW metadata: %s", url)
urllib.urlretrieve(url, target_filepath)
else:
raise IOError("%s is missing" % target_filepath)
if not exists(data_folder_path):
if not exists(archive_path):
if download_if_missing:
logger.warn("Downloading LFW data (~200MB): %s", archive_url)
urllib.urlretrieve(archive_url, archive_path)
else:
raise IOError("%s is missing" % target_filepath)
import tarfile
logger.info("Decompressing the data archive to %s", data_folder_path)
tarfile.open(archive_path, "r:gz").extractall(path=lfw_home)
remove(archive_path)
return lfw_home, data_folder_path
def _load_imgs(file_paths, slice_, color, resize):
"""Internally used to load images"""
# Try to import imread and imresize from PIL. We do this here to prevent
# the whole sklearn.datasets module from depending on PIL.
try:
try:
from scipy.misc import imread
except ImportError:
from scipy.misc.pilutil import imread
from scipy.misc import imresize
except ImportError:
raise ImportError("The Python Imaging Library (PIL)"
" is required to load data from jpeg files")
# compute the portion of the images to load to respect the slice_ parameter
# given by the caller
default_slice = (slice(0, 250), slice(0, 250))
if slice_ is None:
slice_ = default_slice
else:
slice_ = tuple(s or ds for s, ds in zip(slice_, default_slice))
h_slice, w_slice = slice_
h = (h_slice.stop - h_slice.start) / (h_slice.step or 1)
w = (w_slice.stop - w_slice.start) / (w_slice.step or 1)
if resize is not None:
resize = float(resize)
h = int(resize * h)
w = int(resize * w)
# allocate some contiguous memory to host the decoded image slices
n_faces = len(file_paths)
if not color:
faces = np.zeros((n_faces, h, w), dtype=np.float32)
else:
faces = np.zeros((n_faces, h, w, 3), dtype=np.float32)
# iterate over the collected file path to load the jpeg files as numpy
# arrays
for i, file_path in enumerate(file_paths):
if i % 1000 == 0:
logger.info("Loading face #%05d / %05d", i + 1, n_faces)
face = np.asarray(imread(file_path)[slice_], dtype=np.float32)
face /= 255.0 # scale uint8 coded colors to the [0.0, 1.0] floats
if resize is not None:
face = imresize(face, resize)
if not color:
# average the color channels to compute a gray levels
# representaion
face = face.mean(axis=2)
faces[i, ...] = face
return faces
#
# Task #1: Face Identification on picture with names
#
def _fetch_lfw_people(data_folder_path, slice_=None, color=False, resize=None,
min_faces_per_person=0):
"""Perform the actual data loading for the lfw people dataset
This operation is meant to be cached by a joblib wrapper.
"""
# scan the data folder content to retain people with more that
# `min_faces_per_person` face pictures
person_names, file_paths = [], []
for person_name in sorted(listdir(data_folder_path)):
folder_path = join(data_folder_path, person_name)
if not isdir(folder_path):
continue
paths = [join(folder_path, f) for f in listdir(folder_path)]
n_pictures = len(paths)
if n_pictures >= min_faces_per_person:
person_name = person_name.replace('_', ' ')
person_names.extend([person_name] * n_pictures)
file_paths.extend(paths)
n_faces = len(file_paths)
if n_faces == 0:
raise ValueError("min_faces_per_person=%d is too restrictive" %
min_faces_per_person)
target_names = np.unique(person_names)
target = np.searchsorted(target_names, person_names)
faces = _load_imgs(file_paths, slice_, color, resize)
# shuffle the faces with a deterministic RNG scheme to avoid having
# all faces of the same person in a row, as it would break some
# cross validation and learning algorithms such as SGD and online
# k-means that make an IID assumption
indices = np.arange(n_faces)
np.random.RandomState(42).shuffle(indices)
faces, target = faces[indices], target[indices]
return faces, target, target_names
def fetch_lfw_people(data_home=None, funneled=True, resize=0.5,
min_faces_per_person=0, color=False,
slice_=(slice(70, 195), slice(78, 172)),
download_if_missing=True):
"""Loader for the Labeled Faces in the Wild (LFW) people dataset
This dataset is a collection of JPEG pictures of famous people
collected on the internet, all details are available on the
official website:
http://vis-www.cs.umass.edu/lfw/
Each picture is centered on a single face. Each pixel of each channel
(color in RGB) is encoded by a float in range 0.0 - 1.0.
The task is called Face Recognition (or Identification): given the
picture of a face, find the name of the person given a training set
(gallery).
Parameters
----------
data_home: optional, default: None
Specify another download and cache folder for the datasets. By default
all scikit learn data is stored in '~/scikit_learn_data' subfolders.
funneled: boolean, optional, default: True
Download and use the funneled variant of the dataset.
resize: float, optional, default 0.5
Ratio used to resize the each face picture.
min_faces_per_person: int, optional, default None
The extracted dataset will only retain pictures of people that have at
least `min_faces_per_person` different pictures.
color: boolean, optional, default False
Keep the 3 RGB channels instead of averaging them to a single
gray level channel. If color is True the shape of the data has
one more dimension than than the shape with color = False.
slice_: optional
Provide a custom 2D slice (height, width) to extract the
'interesting' part of the jpeg files and avoid use statistical
correlation from the background
download_if_missing: optional, True by default
If False, raise a IOError if the data is not locally available
instead of trying to download the data from the source site.
Returns
-------
dataset : dict-like object with the following attributes:
dataset.data : numpy array of shape (13233, 2914)
Each row corresponds to a ravelled face image of original size 62 x 47
pixels.
dataset.images : numpy array of shape (13233, 62, 47)
Each row is a face image corresponding to one of the 5749 people in
the dataset.
dataset.target : numpy array of shape (13233,)
Labels associated to each face image. Those labels range from 0-5748
and correspond to the person IDs.
dataset.DESCR : string
Description of the Labeled Faces in the Wild (LFW) dataset.
"""
lfw_home, data_folder_path = check_fetch_lfw(
data_home=data_home, funneled=funneled,
download_if_missing=download_if_missing)
logger.info('Loading LFW people faces from %s', lfw_home)
# wrap the loader in a memoizing function that will return memmaped data
# arrays for optimal memory usage
m = Memory(cachedir=lfw_home, compress=6, verbose=0)
load_func = m.cache(_fetch_lfw_people)
# load and memoize the pairs as np arrays
faces, target, target_names = load_func(
data_folder_path, resize=resize,
min_faces_per_person=min_faces_per_person, color=color, slice_=slice_)
# pack the results as a Bunch instance
return Bunch(data=faces.reshape(len(faces), -1), images=faces,
target=target, target_names=target_names,
DESCR="LFW faces dataset")
#
# Task #2: Face Verification on pairs of face pictures
#
def _fetch_lfw_pairs(index_file_path, data_folder_path, slice_=None,
color=False, resize=None):
"""Perform the actual data loading for the LFW pairs dataset
This operation is meant to be cached by a joblib wrapper.
"""
# parse the index file to find the number of pairs to be able to allocate
# the right amount of memory before starting to decode the jpeg files
with open(index_file_path, 'rb') as index_file:
split_lines = [ln.strip().split(b('\t')) for ln in index_file]
pair_specs = [sl for sl in split_lines if len(sl) > 2]
n_pairs = len(pair_specs)
# interating over the metadata lines for each pair to find the filename to
# decode and load in memory
target = np.zeros(n_pairs, dtype=np.int)
file_paths = list()
for i, components in enumerate(pair_specs):
if len(components) == 3:
target[i] = 1
pair = (
(components[0], int(components[1]) - 1),
(components[0], int(components[2]) - 1),
)
elif len(components) == 4:
target[i] = 0
pair = (
(components[0], int(components[1]) - 1),
(components[2], int(components[3]) - 1),
)
else:
raise ValueError("invalid line %d: %r" % (i + 1, components))
for j, (name, idx) in enumerate(pair):
try:
person_folder = join(data_folder_path, name)
except TypeError:
person_folder = join(data_folder_path, str(name, 'UTF-8'))
filenames = list(sorted(listdir(person_folder)))
file_path = join(person_folder, filenames[idx])
file_paths.append(file_path)
pairs = _load_imgs(file_paths, slice_, color, resize)
shape = list(pairs.shape)
n_faces = shape.pop(0)
shape.insert(0, 2)
shape.insert(0, n_faces // 2)
pairs.shape = shape
return pairs, target, np.array(['Different persons', 'Same person'])
def load_lfw_people(download_if_missing=False, **kwargs):
"""Alias for fetch_lfw_people(download_if_missing=False)
Check fetch_lfw_people.__doc__ for the documentation and parameter list.
"""
return fetch_lfw_people(download_if_missing=download_if_missing, **kwargs)
def fetch_lfw_pairs(subset='train', data_home=None, funneled=True, resize=0.5,
color=False, slice_=(slice(70, 195), slice(78, 172)),
download_if_missing=True):
"""Loader for the Labeled Faces in the Wild (LFW) pairs dataset
This dataset is a collection of JPEG pictures of famous people
collected on the internet, all details are available on the
official website:
http://vis-www.cs.umass.edu/lfw/
Each picture is centered on a single face. Each pixel of each channel
(color in RGB) is encoded by a float in range 0.0 - 1.0.
The task is called Face Verification: given a pair of two pictures,
a binary classifier must predict whether the two images are from
the same person.
In the official `README.txt`_ this task is described as the
"Restricted" task. As I am not sure as to implement the
"Unrestricted" variant correctly, I left it as unsupported for now.
.. _`README.txt`: http://vis-www.cs.umass.edu/lfw/README.txt
Parameters
----------
subset: optional, default: 'train'
Select the dataset to load: 'train' for the development training
set, 'test' for the development test set, and '10_folds' for the
official evaluation set that is meant to be used with a 10-folds
cross validation.
data_home: optional, default: None
Specify another download and cache folder for the datasets. By
default all scikit learn data is stored in '~/scikit_learn_data'
subfolders.
funneled: boolean, optional, default: True
Download and use the funneled variant of the dataset.
resize: float, optional, default 0.5
Ratio used to resize the each face picture.
color: boolean, optional, default False
Keep the 3 RGB channels instead of averaging them to a single
gray level channel. If color is True the shape of the data has
one more dimension than than the shape with color = False.
slice_: optional
Provide a custom 2D slice (height, width) to extract the
'interesting' part of the jpeg files and avoid use statistical
correlation from the background
download_if_missing: optional, True by default
If False, raise a IOError if the data is not locally available
instead of trying to download the data from the source site.
Returns
-------
The data is returned as a Bunch object with the following attributes:
data : numpy array of shape (2200, 5828)
Each row corresponds to 2 ravel'd face images of original size 62 x 67
pixels.
pairs : numpy array of shape (2200, 2, 62, 67)
Each row has 2 face images corresponding to same or different person
from the dataset containing 5749 people.
target : numpy array of shape (13233,)
Labels associated to each pair of images. The two label values being
different persons or the same person.
DESCR : string
Description of the Labeled Faces in the Wild (LFW) dataset.
"""
lfw_home, data_folder_path = check_fetch_lfw(
data_home=data_home, funneled=funneled,
download_if_missing=download_if_missing)
logger.info('Loading %s LFW pairs from %s', subset, lfw_home)
# wrap the loader in a memoizing function that will return memmaped data
# arrays for optimal memory usage
m = Memory(cachedir=lfw_home, compress=6, verbose=0)
load_func = m.cache(_fetch_lfw_pairs)
# select the right metadata file according to the requested subset
label_filenames = {
'train': 'pairsDevTrain.txt',
'test': 'pairsDevTest.txt',
'10_folds': 'pairs.txt',
}
if subset not in label_filenames:
raise ValueError("subset='%s' is invalid: should be one of %r" % (
subset, list(sorted(label_filenames.keys()))))
index_file_path = join(lfw_home, label_filenames[subset])
# load and memoize the pairs as np arrays
pairs, target, target_names = load_func(
index_file_path, data_folder_path, resize=resize, color=color,
slice_=slice_)
# pack the results as a Bunch instance
return Bunch(data=pairs.reshape(len(pairs), -1), pairs=pairs,
target=target, target_names=target_names,
DESCR="'%s' segment of the LFW pairs dataset" % subset)
def load_lfw_pairs(download_if_missing=False, **kwargs):
"""Alias for fetch_lfw_pairs(download_if_missing=False)
Check fetch_lfw_pairs.__doc__ for the documentation and parameter list.
"""
return fetch_lfw_pairs(download_if_missing=download_if_missing, **kwargs)
| bsd-3-clause |
jazcollins/models | cognitive_mapping_and_planning/scripts/script_plot_trajectory.py | 9 | 12900 | # Copyright 2016 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
r"""
Code for plotting trajectories in the top view, and also plot first person views
from saved trajectories. Does not run the network but only loads the mesh data
to plot the view points.
CUDA_VISIBLE_DEVICES=0 LD_LIBRARY_PATH=/opt/cuda-8.0/lib64:/opt/cudnnv51/lib64
PYTHONPATH='.' PYOPENGL_PLATFORM=egl python scripts/script_plot_trajectory.py \
--first_person --num_steps 40 \
--config_name cmp.lmap_Msc.clip5.sbpd_d_r2r \
--imset test --alsologtostderr --base_dir output --out_dir vis
"""
import os, sys, numpy as np, copy
import matplotlib
matplotlib.use("Agg")
import matplotlib.pyplot as plt
import matplotlib.animation as animation
from matplotlib.gridspec import GridSpec
import tensorflow as tf
from tensorflow.contrib import slim
import cv2
import logging
from tensorflow.python.platform import gfile
from tensorflow.python.platform import app
from tensorflow.python.platform import flags
from datasets import nav_env
import scripts.script_nav_agent_release as sna
import src.file_utils as fu
from src import graph_utils
from src import utils
FLAGS = flags.FLAGS
flags.DEFINE_string('out_dir', 'vis', 'Directory where to store the output')
flags.DEFINE_string('type', '', 'Optional type.')
flags.DEFINE_bool('first_person', False, 'Visualize the first person view.')
flags.DEFINE_bool('top_view', False, 'Visualize the trajectory in the top view.')
flags.DEFINE_integer('num_steps', 40, 'Number of steps to run the model for.')
flags.DEFINE_string('imset', 'test', '')
flags.DEFINE_string('base_dir', 'output', 'Cache directory.')
def _get_suffix_str():
return ''
def _load_trajectory():
base_dir = FLAGS.base_dir
config_name = FLAGS.config_name+_get_suffix_str()
dir_name = os.path.join(base_dir, FLAGS.type, config_name)
logging.info('Waiting for snapshot in directory %s.', dir_name)
last_checkpoint = slim.evaluation.wait_for_new_checkpoint(dir_name, None)
checkpoint_iter = int(os.path.basename(last_checkpoint).split('-')[1])
# Load the distances.
a = utils.load_variables(os.path.join(dir_name, 'bench_on_'+FLAGS.imset,
'all_locs_at_t_{:d}.pkl'.format(checkpoint_iter)))
return a
def _compute_hardness():
# Load the stanford data to compute the hardness.
if FLAGS.type == '':
args = sna.get_args_for_config(FLAGS.config_name+'+bench_'+FLAGS.imset)
else:
args = sna.get_args_for_config(FLAGS.type+'.'+FLAGS.config_name+'+bench_'+FLAGS.imset)
args.navtask.logdir = None
R = lambda: nav_env.get_multiplexer_class(args.navtask, 0)
R = R()
rng_data = [np.random.RandomState(0), np.random.RandomState(0)]
# Sample a room.
h_dists = []
gt_dists = []
for i in range(250):
e = R.sample_env(rng_data)
nodes = e.task.nodes
# Initialize the agent.
init_env_state = e.reset(rng_data)
gt_dist_to_goal = [e.episode.dist_to_goal[0][j][s]
for j, s in enumerate(e.episode.start_node_ids)]
for j in range(args.navtask.task_params.batch_size):
start_node_id = e.episode.start_node_ids[j]
end_node_id =e.episode.goal_node_ids[0][j]
h_dist = graph_utils.heuristic_fn_vec(
nodes[[start_node_id],:], nodes[[end_node_id], :],
n_ori=args.navtask.task_params.n_ori,
step_size=args.navtask.task_params.step_size)[0][0]
gt_dist = e.episode.dist_to_goal[0][j][start_node_id]
h_dists.append(h_dist)
gt_dists.append(gt_dist)
h_dists = np.array(h_dists)
gt_dists = np.array(gt_dists)
e = R.sample_env([np.random.RandomState(0), np.random.RandomState(0)])
input = e.get_common_data()
orig_maps = input['orig_maps'][0,0,:,:,0]
return h_dists, gt_dists, orig_maps
def plot_trajectory_first_person(dt, orig_maps, out_dir):
out_dir = os.path.join(out_dir, FLAGS.config_name+_get_suffix_str(),
FLAGS.imset)
fu.makedirs(out_dir)
# Load the model so that we can render.
plt.set_cmap('gray')
samples_per_action = 8; wait_at_action = 0;
Writer = animation.writers['mencoder']
writer = Writer(fps=3*(samples_per_action+wait_at_action),
metadata=dict(artist='anonymous'), bitrate=1800)
args = sna.get_args_for_config(FLAGS.config_name + '+bench_'+FLAGS.imset)
args.navtask.logdir = None
navtask_ = copy.deepcopy(args.navtask)
navtask_.camera_param.modalities = ['rgb']
navtask_.task_params.modalities = ['rgb']
sz = 512
navtask_.camera_param.height = sz
navtask_.camera_param.width = sz
navtask_.task_params.img_height = sz
navtask_.task_params.img_width = sz
R = lambda: nav_env.get_multiplexer_class(navtask_, 0)
R = R()
b = R.buildings[0]
f = [0 for _ in range(wait_at_action)] + \
[float(_)/samples_per_action for _ in range(samples_per_action)];
# Generate things for it to render.
inds_to_do = []
inds_to_do += [1, 4, 10] #1291, 1268, 1273, 1289, 1302, 1426, 1413, 1449, 1399, 1390]
for i in inds_to_do:
fig = plt.figure(figsize=(10,8))
gs = GridSpec(3,4)
gs.update(wspace=0.05, hspace=0.05, left=0.0, top=0.97, right=1.0, bottom=0.)
ax = fig.add_subplot(gs[:,:-1])
ax1 = fig.add_subplot(gs[0,-1])
ax2 = fig.add_subplot(gs[1,-1])
ax3 = fig.add_subplot(gs[2,-1])
axes = [ax, ax1, ax2, ax3]
# ax = fig.add_subplot(gs[:,:])
# axes = [ax]
for ax in axes:
ax.set_axis_off()
node_ids = dt['all_node_ids'][i, :, 0]*1
# Prune so that last node is not repeated more than 3 times?
if np.all(node_ids[-4:] == node_ids[-1]):
while node_ids[-4] == node_ids[-1]:
node_ids = node_ids[:-1]
num_steps = np.minimum(FLAGS.num_steps, len(node_ids))
xyt = b.to_actual_xyt_vec(b.task.nodes[node_ids])
xyt_diff = xyt[1:,:] - xyt[:-1:,:]
xyt_diff[:,2] = np.mod(xyt_diff[:,2], 4)
ind = np.where(xyt_diff[:,2] == 3)[0]
xyt_diff[ind, 2] = -1
xyt_diff = np.expand_dims(xyt_diff, axis=1)
to_cat = [xyt_diff*_ for _ in f]
perturbs_all = np.concatenate(to_cat, axis=1)
perturbs_all = np.concatenate([perturbs_all, np.zeros_like(perturbs_all[:,:,:1])], axis=2)
node_ids_all = np.expand_dims(node_ids, axis=1)*1
node_ids_all = np.concatenate([node_ids_all for _ in f], axis=1)
node_ids_all = np.reshape(node_ids_all[:-1,:], -1)
perturbs_all = np.reshape(perturbs_all, [-1, 4])
imgs = b.render_nodes(b.task.nodes[node_ids_all,:], perturb=perturbs_all)
# Get action at each node.
actions = []
_, action_to_nodes = b.get_feasible_actions(node_ids)
for j in range(num_steps-1):
action_to_node = action_to_nodes[j]
node_to_action = dict(zip(action_to_node.values(), action_to_node.keys()))
actions.append(node_to_action[node_ids[j+1]])
def init_fn():
return fig,
gt_dist_to_goal = []
# Render trajectories.
def worker(j):
# Plot the image.
step_number = j/(samples_per_action + wait_at_action)
img = imgs[j]; ax = axes[0]; ax.clear(); ax.set_axis_off();
img = img.astype(np.uint8); ax.imshow(img);
tt = ax.set_title(
"First Person View\n" +
"Top corners show diagnostics (distance, agents' action) not input to agent.",
fontsize=12)
plt.setp(tt, color='white')
# Distance to goal.
t = 'Dist to Goal:\n{:2d} steps'.format(int(dt['all_d_at_t'][i, step_number]))
t = ax.text(0.01, 0.99, t,
horizontalalignment='left',
verticalalignment='top',
fontsize=20, color='red',
transform=ax.transAxes, alpha=1.0)
t.set_bbox(dict(color='white', alpha=0.85, pad=-0.1))
# Action to take.
action_latex = ['$\odot$ ', '$\curvearrowright$ ', '$\curvearrowleft$ ', '$\Uparrow$ ']
t = ax.text(0.99, 0.99, action_latex[actions[step_number]],
horizontalalignment='right',
verticalalignment='top',
fontsize=40, color='green',
transform=ax.transAxes, alpha=1.0)
t.set_bbox(dict(color='white', alpha=0.85, pad=-0.1))
# Plot the map top view.
ax = axes[-1]
if j == 0:
# Plot the map
locs = dt['all_locs'][i,:num_steps,:]
goal_loc = dt['all_goal_locs'][i,:,:]
xymin = np.minimum(np.min(goal_loc, axis=0), np.min(locs, axis=0))
xymax = np.maximum(np.max(goal_loc, axis=0), np.max(locs, axis=0))
xy1 = (xymax+xymin)/2. - 0.7*np.maximum(np.max(xymax-xymin), 24)
xy2 = (xymax+xymin)/2. + 0.7*np.maximum(np.max(xymax-xymin), 24)
ax.set_axis_on()
ax.patch.set_facecolor((0.333, 0.333, 0.333))
ax.set_xticks([]); ax.set_yticks([]);
ax.imshow(orig_maps, origin='lower', vmin=-1.0, vmax=2.0)
ax.plot(goal_loc[:,0], goal_loc[:,1], 'g*', markersize=12)
locs = dt['all_locs'][i,:1,:]
ax.plot(locs[:,0], locs[:,1], 'b.', markersize=12)
ax.set_xlim([xy1[0], xy2[0]])
ax.set_ylim([xy1[1], xy2[1]])
locs = dt['all_locs'][i,step_number,:]
locs = np.expand_dims(locs, axis=0)
ax.plot(locs[:,0], locs[:,1], 'r.', alpha=1.0, linewidth=0, markersize=4)
tt = ax.set_title('Trajectory in topview', fontsize=14)
plt.setp(tt, color='white')
return fig,
line_ani = animation.FuncAnimation(fig, worker,
(num_steps-1)*(wait_at_action+samples_per_action),
interval=500, blit=True, init_func=init_fn)
tmp_file_name = 'tmp.mp4'
line_ani.save(tmp_file_name, writer=writer, savefig_kwargs={'facecolor':'black'})
out_file_name = os.path.join(out_dir, 'vis_{:04d}.mp4'.format(i))
print out_file_name
if fu.exists(out_file_name):
gfile.Remove(out_file_name)
gfile.Copy(tmp_file_name, out_file_name)
gfile.Remove(tmp_file_name)
plt.close(fig)
def plot_trajectory(dt, hardness, orig_maps, out_dir):
out_dir = os.path.join(out_dir, FLAGS.config_name+_get_suffix_str(),
FLAGS.imset)
fu.makedirs(out_dir)
out_file = os.path.join(out_dir, 'all_locs_at_t.pkl')
dt['hardness'] = hardness
utils.save_variables(out_file, dt.values(), dt.keys(), overwrite=True)
#Plot trajectories onto the maps
plt.set_cmap('gray')
for i in range(4000):
goal_loc = dt['all_goal_locs'][i, :, :]
locs = np.concatenate((dt['all_locs'][i,:,:],
dt['all_locs'][i,:,:]), axis=0)
xymin = np.minimum(np.min(goal_loc, axis=0), np.min(locs, axis=0))
xymax = np.maximum(np.max(goal_loc, axis=0), np.max(locs, axis=0))
xy1 = (xymax+xymin)/2. - 1.*np.maximum(np.max(xymax-xymin), 24)
xy2 = (xymax+xymin)/2. + 1.*np.maximum(np.max(xymax-xymin), 24)
fig, ax = utils.tight_imshow_figure(plt, figsize=(6,6))
ax.set_axis_on()
ax.patch.set_facecolor((0.333, 0.333, 0.333))
ax.set_xticks([])
ax.set_yticks([])
all_locs = dt['all_locs'][i,:,:]*1
uniq = np.where(np.any(all_locs[1:,:] != all_locs[:-1,:], axis=1))[0]+1
uniq = np.sort(uniq).tolist()
uniq.insert(0,0)
uniq = np.array(uniq)
all_locs = all_locs[uniq, :]
ax.plot(dt['all_locs'][i, 0, 0],
dt['all_locs'][i, 0, 1], 'b.', markersize=24)
ax.plot(dt['all_goal_locs'][i, 0, 0],
dt['all_goal_locs'][i, 0, 1], 'g*', markersize=19)
ax.plot(all_locs[:,0], all_locs[:,1], 'r', alpha=0.4, linewidth=2)
ax.scatter(all_locs[:,0], all_locs[:,1],
c=5+np.arange(all_locs.shape[0])*1./all_locs.shape[0],
cmap='Reds', s=30, linewidth=0)
ax.imshow(orig_maps, origin='lower', vmin=-1.0, vmax=2.0, aspect='equal')
ax.set_xlim([xy1[0], xy2[0]])
ax.set_ylim([xy1[1], xy2[1]])
file_name = os.path.join(out_dir, 'trajectory_{:04d}.png'.format(i))
print file_name
with fu.fopen(file_name, 'w') as f:
plt.savefig(f)
plt.close(fig)
def main(_):
a = _load_trajectory()
h_dists, gt_dists, orig_maps = _compute_hardness()
hardness = 1.-h_dists*1./ gt_dists
if FLAGS.top_view:
plot_trajectory(a, hardness, orig_maps, out_dir=FLAGS.out_dir)
if FLAGS.first_person:
plot_trajectory_first_person(a, orig_maps, out_dir=FLAGS.out_dir)
if __name__ == '__main__':
app.run()
| apache-2.0 |
uvNikita/image-proc-demo | app/main/views.py | 1 | 6528 | import os
import numpy as np
from scipy import ndimage
from scipy import fftpack as fp
from matplotlib import pyplot, cm
from PIL import Image
from flask import Blueprint, render_template, url_for, send_file
from flask import redirect, request, g, current_app
from .util import get_image_path, get_no_image_path, clear_data_folder
from .util import get_image_url, check_image
from .filters import filter_image, FILTERS
from ..util_math import dft2, idft2, showfft, image_diff
from ..compression import compress_dft, compress_dct
main = Blueprint('main', __name__, template_folder='templates')
VALID_EXTENSIONS = {'png', 'jpg'}
IMAGE_TYPES = {
'origin', 'rec',
'fft', 'fft-real', 'fft-imag', 'fft-phase',
'filtered_low_pass', 'filtered_high_pass',
'filtered_band_pass', 'filtered_band_reject',
'compressed'
}
@main.route('/')
def index():
return redirect(url_for('main.upload'))
@main.route('/upload', methods=['GET', 'POST'])
def upload():
if request.method == 'POST':
file = request.files['file']
image = Image.open(file)
if file:
_, ext = os.path.splitext(file.filename)
ext = ext[1:]
if ext in VALID_EXTENSIONS:
clear_data_folder()
response = current_app.make_response(redirect(url_for('.upload')))
g.current_image = ext
response.set_cookie('current_image', g.current_image)
image = image.convert('L')
image.save(get_image_path())
return response
return redirect(url_for('.upload'))
return render_template('main/upload.jinja', image_url=get_image_url())
@main.route('/image')
def get_image():
type = request.args['type']
if type not in IMAGE_TYPES or not g.get('current_image'):
image_path = get_no_image_path()
else:
image_path = get_image_path(type=type)
if not os.path.exists(image_path):
image_path = get_no_image_path()
return send_file(image_path, mimetype='image/gif')
@main.route('/fourier', methods=['GET', 'POST'])
@check_image
def fourier():
if request.method == 'POST':
image = ndimage.imread(get_image_path())
dft_res = fp.fftshift(dft2(image))
x_max = dft_res.shape[1]/2
y_max = dft_res.shape[0]/2
dims = [-x_max, x_max, -y_max, y_max]
pyplot.imshow(showfft(abs(dft_res)), cmap=cm.Greys_r, extent=dims)
pyplot.savefig(get_image_path(type='fft'))
pyplot.close()
pyplot.imshow(np.angle(dft_res), cmap=cm.Greys_r, extent=dims)
pyplot.savefig(get_image_path(type='fft-phase'))
pyplot.close()
pyplot.imshow(showfft(np.real(dft_res)), cmap=cm.Greys_r, extent=dims)
pyplot.savefig(get_image_path(type='fft-real'))
pyplot.close()
pyplot.imshow(showfft(np.imag(dft_res)), cmap=cm.Greys_r, extent=dims)
pyplot.savefig(get_image_path(type='fft-imag'))
pyplot.close()
return redirect(url_for('.fourier'))
return render_template('main/fourier.jinja')
@main.route('/inv-fourier', methods=['GET', 'POST'])
@check_image
def inv_fourier():
orig_im = ndimage.imread(get_image_path())
if os.path.exists(get_image_path(type='rec')):
rec_im = ndimage.imread(get_image_path(type='rec'))
diff = image_diff(orig_im, rec_im)
else:
diff = 0
if request.method == 'POST':
image = ndimage.imread(get_image_path())
rec_im = Image.fromarray(idft2(dft2(image)).astype(np.uint8))
rec_im.save(get_image_path(type='rec'))
return redirect(url_for('.inv_fourier'))
return render_template('main/inv_fourier.jinja', diff=diff)
@main.route('/low-pass', methods=['GET', 'POST'])
@check_image
def low_pass():
return process_filter('low_pass', ['cutoff'])
@main.route('/high-pass', methods=['GET', 'POST'])
@check_image
def high_pass():
return process_filter('high_pass', ['cutoff'])
@main.route('/band-pass', methods=['GET', 'POST'])
@check_image
def band_pass():
return process_filter('band_pass', ['cutoff', 'width'])
@main.route('/band-reject', methods=['GET', 'POST'])
@check_image
def band_reject():
return process_filter('band_reject', ['cutoff', 'width'])
@main.route('/compression', methods=['GET', 'POST'])
@check_image
def compression():
orig_im = ndimage.imread(get_image_path())
if os.path.exists(get_image_path(type='compressed')):
comp_im = ndimage.imread(get_image_path(type='compressed'))
diff = image_diff(orig_im, comp_im)
else:
diff = 0
if request.method == 'POST':
compression_method = request.form['compression_method']
compression_level = float(request.form['compression_level'])
if compression_method == 'dft':
compression_function = compress_dft
elif compression_method == 'dct':
compression_function = compress_dct
else:
return about(400)
image = ndimage.imread(get_image_path())
image_c = compression_function(image, compression_level)
Image.fromarray(image_c.astype(np.uint8)).save(get_image_path(type='compressed'))
return redirect(url_for('.compression', **request.form))
return render_template('main/compression.jinja', diff=diff)
def process_filter(filter_type, options):
filtered_image_path = get_image_path(type='filtered_{}'.format(filter_type))
orig_im = ndimage.imread(get_image_path())
if os.path.exists(filtered_image_path):
comp_im = ndimage.imread(filtered_image_path)
diff = image_diff(orig_im, comp_im)
else:
diff = 0
if request.method == 'POST':
filter_name = request.form['filter_name']
option_values = {
option: float(request.form[option])
for option in options
}
if filter_name == 'butterworth':
option_values['order'] = int(request.form['order'])
image = ndimage.imread(get_image_path())
filter_func = FILTERS[filter_type][filter_name]
image_filtered = filter_image(image, filter_func, option_values)
im = Image.fromarray(image_filtered.astype(np.uint8))
im.save(filtered_image_path)
return redirect(url_for('.{}'.format(filter_type), **request.form))
return render_template('main/filter.jinja', filter_type=filter_type, options=options, diff=diff)
@main.route('/about')
def about():
return render_template('main/about.jinja')
| mit |
tntnatbry/tensorflow | tensorflow/examples/learn/iris_run_config.py | 86 | 2087 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Example of DNNClassifier for Iris plant dataset, with run config."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from sklearn import cross_validation
from sklearn import datasets
from sklearn import metrics
import tensorflow as tf
def main(unused_argv):
# Load dataset.
iris = datasets.load_iris()
x_train, x_test, y_train, y_test = cross_validation.train_test_split(
iris.data, iris.target, test_size=0.2, random_state=42)
# You can define you configurations by providing a RunConfig object to
# estimator to control session configurations, e.g. num_cores
# and gpu_memory_fraction
run_config = tf.contrib.learn.estimators.RunConfig(
num_cores=3, gpu_memory_fraction=0.6)
# Build 3 layer DNN with 10, 20, 10 units respectively.
feature_columns = tf.contrib.learn.infer_real_valued_columns_from_input(
x_train)
classifier = tf.contrib.learn.DNNClassifier(feature_columns=feature_columns,
hidden_units=[10, 20, 10],
n_classes=3,
config=run_config)
# Fit and predict.
classifier.fit(x_train, y_train, steps=200)
predictions = list(classifier.predict(x_test, as_iterable=True))
score = metrics.accuracy_score(y_test, predictions)
print('Accuracy: {0:f}'.format(score))
if __name__ == '__main__':
tf.app.run()
| apache-2.0 |
Titan-C/scikit-learn | sklearn/decomposition/base.py | 5 | 5613 | """Principal Component Analysis Base Classes"""
# Author: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Olivier Grisel <olivier.grisel@ensta.org>
# Mathieu Blondel <mathieu@mblondel.org>
# Denis A. Engemann <denis-alexander.engemann@inria.fr>
# Kyle Kastner <kastnerkyle@gmail.com>
#
# License: BSD 3 clause
import numpy as np
from scipy import linalg
from ..base import BaseEstimator, TransformerMixin
from ..utils import check_array
from ..utils.validation import check_is_fitted
from ..externals import six
from abc import ABCMeta, abstractmethod
class _BasePCA(six.with_metaclass(ABCMeta, BaseEstimator, TransformerMixin)):
"""Base class for PCA methods.
Warning: This class should not be used directly.
Use derived classes instead.
"""
def get_covariance(self):
"""Compute data covariance with the generative model.
``cov = components_.T * S**2 * components_ + sigma2 * eye(n_features)``
where S**2 contains the explained variances, and sigma2 contains the
noise variances.
Returns
-------
cov : array, shape=(n_features, n_features)
Estimated covariance of data.
"""
components_ = self.components_
exp_var = self.explained_variance_
if self.whiten:
components_ = components_ * np.sqrt(exp_var[:, np.newaxis])
exp_var_diff = np.maximum(exp_var - self.noise_variance_, 0.)
cov = np.dot(components_.T * exp_var_diff, components_)
cov.flat[::len(cov) + 1] += self.noise_variance_ # modify diag inplace
return cov
def get_precision(self):
"""Compute data precision matrix with the generative model.
Equals the inverse of the covariance but computed with
the matrix inversion lemma for efficiency.
Returns
-------
precision : array, shape=(n_features, n_features)
Estimated precision of data.
"""
n_features = self.components_.shape[1]
# handle corner cases first
if self.n_components_ == 0:
return np.eye(n_features) / self.noise_variance_
if self.n_components_ == n_features:
return linalg.inv(self.get_covariance())
# Get precision using matrix inversion lemma
components_ = self.components_
exp_var = self.explained_variance_
if self.whiten:
components_ = components_ * np.sqrt(exp_var[:, np.newaxis])
exp_var_diff = np.maximum(exp_var - self.noise_variance_, 0.)
precision = np.dot(components_, components_.T) / self.noise_variance_
precision.flat[::len(precision) + 1] += 1. / exp_var_diff
precision = np.dot(components_.T,
np.dot(linalg.inv(precision), components_))
precision /= -(self.noise_variance_ ** 2)
precision.flat[::len(precision) + 1] += 1. / self.noise_variance_
return precision
@abstractmethod
def fit(X, y=None):
"""Placeholder for fit. Subclasses should implement this method!
Fit the model with X.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data, where n_samples is the number of samples and
n_features is the number of features.
Returns
-------
self : object
Returns the instance itself.
"""
def transform(self, X, y=None):
"""Apply dimensionality reduction to X.
X is projected on the first principal components previously extracted
from a training set.
Parameters
----------
X : array-like, shape (n_samples, n_features)
New data, where n_samples is the number of samples
and n_features is the number of features.
Returns
-------
X_new : array-like, shape (n_samples, n_components)
Examples
--------
>>> import numpy as np
>>> from sklearn.decomposition import IncrementalPCA
>>> X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]])
>>> ipca = IncrementalPCA(n_components=2, batch_size=3)
>>> ipca.fit(X)
IncrementalPCA(batch_size=3, copy=True, n_components=2, whiten=False)
>>> ipca.transform(X) # doctest: +SKIP
"""
check_is_fitted(self, ['mean_', 'components_'], all_or_any=all)
X = check_array(X)
if self.mean_ is not None:
X = X - self.mean_
X_transformed = np.dot(X, self.components_.T)
if self.whiten:
X_transformed /= np.sqrt(self.explained_variance_)
return X_transformed
def inverse_transform(self, X, y=None):
"""Transform data back to its original space.
In other words, return an input X_original whose transform would be X.
Parameters
----------
X : array-like, shape (n_samples, n_components)
New data, where n_samples is the number of samples
and n_components is the number of components.
Returns
-------
X_original array-like, shape (n_samples, n_features)
Notes
-----
If whitening is enabled, inverse_transform will compute the
exact inverse operation, which includes reversing whitening.
"""
if self.whiten:
return np.dot(X, np.sqrt(self.explained_variance_[:, np.newaxis]) *
self.components_) + self.mean_
else:
return np.dot(X, self.components_) + self.mean_
| bsd-3-clause |
DataReplyUK/datareplyuk | GenesAssociation/__init__.py | 1 | 6038 | """
Program: RUNNING GRAPH ANALYTICS WITH SPARK GRAPH-FRAMES:
Author: Dr. C. Hadjinikolis
Date: 14/09/2016
Description: This is the application's core module from where everything is executed.
The module is responsible for:
1. Loading Spark
2. Loading GraphFrames
3. Running analytics by leveraging other modules in the package.
"""
# IMPORT OTHER LIBS -------------------------------------------------------------------------------#
import os
import sys
import pandas as pd
# IMPORT SPARK ------------------------------------------------------------------------------------#
# Path to Spark source folder
USER_FILE_PATH = "/Users/christoshadjinikolis"
SPARK_PATH = "/PycharmProjects/GenesAssociation"
SPARK_FILE = "/spark-2.0.0-bin-hadoop2.7"
SPARK_HOME = USER_FILE_PATH + SPARK_PATH + SPARK_FILE
os.environ['SPARK_HOME'] = SPARK_HOME
# Append pySpark to Python Path
sys.path.append(SPARK_HOME + "/python")
sys.path.append(SPARK_HOME + "/python" + "/lib/py4j-0.10.1-src.zip")
try:
from pyspark import SparkContext
from pyspark import SparkConf
from pyspark.sql import SQLContext
from pyspark.graphframes import graphframe as GF
except ImportError as ex:
print "Can not import Spark Modules", ex
sys.exit(1)
# GLOBAL VARIABLES --------------------------------------------------------------------------------#
# Configure spark properties
CONF = (SparkConf()
.setMaster("local")
.setAppName("My app")
.set("spark.executor.memory", "10g")
.set("spark.executor.instances", "4"))
# Instantiate SparkContext object
SC = SparkContext(conf=CONF)
# Instantiate SQL_SparkContext object
SQL_CONTEXT = SQLContext(SC)
# MAIN CODE ---------------------------------------------------------------------------------------#
if __name__ == "__main__":
# Main Path to CSV files
DATA_PATH = '/PycharmProjects/GenesAssociation/data/'
FILE_NAME = 'gene_gene_associations_50k.csv'
# LOAD DATA CSV USING PANDAS -----------------------------------------------------------------#
print "STEP 1: Loading Gene Nodes -------------------------------------------------------------"
# Read csv file and load as df
GENES = pd.read_csv(USER_FILE_PATH + DATA_PATH + FILE_NAME,
usecols=['OFFICIAL_SYMBOL_A'],
low_memory=True,
iterator=True,
chunksize=1000)
# Concatenate chunks into list & convert to dataFrame
GENES_DF = pd.DataFrame(pd.concat(list(GENES), ignore_index=True))
# Remove duplicates
GENES_DF_CLEAN = GENES_DF.drop_duplicates(keep='first')
# Name Columns
GENES_DF_CLEAN.columns = ['id']
# Output dataFrame
print GENES_DF_CLEAN
# Create vertices
VERTICES = SQL_CONTEXT.createDataFrame(GENES_DF_CLEAN)
# Show some vertices
print VERTICES.take(5)
print "STEP 2: Loading Gene Edges -------------------------------------------------------------"
# Read csv file and load as df
EDGES = pd.read_csv(USER_FILE_PATH + DATA_PATH + FILE_NAME,
usecols=['OFFICIAL_SYMBOL_A', 'OFFICIAL_SYMBOL_B', 'EXPERIMENTAL_SYSTEM'],
low_memory=True,
iterator=True,
chunksize=1000)
# Concatenate chunks into list & convert to dataFrame
EDGES_DF = pd.DataFrame(pd.concat(list(EDGES), ignore_index=True))
# Name Columns
EDGES_DF.columns = ["src", "dst", "rel_type"]
# Output dataFrame
print EDGES_DF
# Create vertices
EDGES = SQL_CONTEXT.createDataFrame(EDGES_DF)
# Show some edges
print EDGES.take(5)
print "STEP 3: Generating the Graph -----------------------------------------------------------"
GENES_GRAPH = GF.GraphFrame(VERTICES, EDGES)
print "STEP 4: Running Various Basic Analytics ------------------------------------------------"
print "Vertex in-Degree -----------------------------------------------------------------------"
GENES_GRAPH.inDegrees.sort('inDegree', ascending=False).show()
print "Vertex out-Degree ----------------------------------------------------------------------"
GENES_GRAPH.outDegrees.sort('outDegree', ascending=False).show()
print "Vertex degree --------------------------------------------------------------------------"
GENES_GRAPH.degrees.sort('degree', ascending=False).show()
print "Triangle Count -------------------------------------------------------------------------"
RESULTS = GENES_GRAPH.triangleCount()
RESULTS.select("id", "count").show()
print "Label Propagation ----------------------------------------------------------------------"
GENES_GRAPH.labelPropagation(maxIter=10).show() # Convergence is not guaranteed
print "PageRank -------------------------------------------------------------------------------"
GENES_GRAPH.pageRank(resetProbability=0.15, tol=0.01)\
.vertices.sort('pagerank', ascending=False).show()
print "STEP 5: Find Shortest Paths w.r.t. Landmarks -------------------------------------------"
# Shortest paths
SHORTEST_PATH = GENES_GRAPH.shortestPaths(landmarks=["ARF3", "MAP2K4"])
SHORTEST_PATH.select("id", "distances").show()
print "STEP 6: Save Vertices and Edges --------------------------------------------------------"
# Save vertices and edges as Parquet to some location.
# Note: You can't overwrite existing vertices and edges directories.
GENES_GRAPH.vertices.write.parquet("vertices")
GENES_GRAPH.edges.write.parquet("edges")
print "STEP 7: Load "
# Load the vertices and edges back.
SAME_VERTICES = GENES_GRAPH.read.parquet("vertices")
SAME_EDGES = GENES_GRAPH.read.parquet("edges")
# Create an identical GraphFrame.
SAME_GENES_GRAPH = GF.GraphFrame(SAME_VERTICES, SAME_EDGES)
# END OF FILE -------------------------------------------------------------------------------------#
| apache-2.0 |
reiinakano/ensemble_helper | src/parameterspinner.py | 1 | 2420 | # This module contains the class for a parameter spinner, containing different methods to automatically generate valid
# hyperparameters from a hyperparameter information dictionary i.e. dictionary "hyperparam" in a hyperparam.py file,
from sklearn.grid_search import ParameterGrid
from collections import Mapping
class ParameterSpinner:
def __init__(self, hyperdict):
self.hyperdict = hyperdict
# Returns the list of hyperparameter names to the dictionary
@staticmethod
def get_parameter_list(hyperdict):
return [key for key in hyperdict]
# Returns valid hyperparameters using only the default values of the dictionary
@staticmethod
def use_default_values(hyperdict):
return {key: value["default"] for key, value in hyperdict.iteritems()}
# This method takes the hyperdict AND another dictionary argument_dict.
# argument_dict is a dictionary containing key-value pairs oin the form hyperparameter: hyperparameter_value
# The method returns a valid hyperparameter dictionary.
@staticmethod
def change_default(hyperdict, argument_dict):
default = ParameterSpinner.use_default_values(hyperdict)
for key, value in argument_dict.iteritems():
if key not in default:
raise KeyError("Parameter '{}' not in dictionary.".format(key))
default[key] = value
return default
# This function is pretty much the exhaustive grid search using sklearn's GridSearchCV.
# "params" is a list of dictionaries representing various grids to be generated.
# The function returns an iterator that yields a valid hyperparam dictionary per iteration.
@staticmethod
def exhaustive_search_iterator(hyperdict, param_grid):
if isinstance(param_grid, Mapping):
param_grid = [param_grid]
for argument_dict in ParameterGrid(param_grid):
yield ParameterSpinner.change_default(hyperdict, argument_dict)
if __name__ == "__main__":
import modulemanager
m = modulemanager.ModuleManager()
hyperdictt = m.get_model_hyperparams("Logistic Regression")
print "default: ", ParameterSpinner.use_default_values(hyperdictt)
param_grid = [{"C": [2, 3], "n_jobs": [2, 1, 3, -1]},
{"C": [2, 4], "penalty": ["l1", "l2"]}]
for my_dict in ParameterSpinner.exhaustive_search_iterator(hyperdictt, param_grid):
print my_dict
| mit |
BorisJeremic/Real-ESSI-Examples | analytic_solution/test_cases/Contact/Dynamic_Shear_Behaviour/Total_Energy_Verification/dt_1e-4/Plot_Results.py | 2 | 2935 | #!/usr/bin/env python
#!/usr/bin/python
import h5py
from matplotlib import pylab
import matplotlib.pylab as plt
import sys
from matplotlib.font_manager import FontProperties
import math
import numpy as np
import h5py
import matplotlib.pylab as plt
import matplotlib as mpl
import sys
import numpy as np;
plt.rcParams.update({'font.size': 24})
# set tick width
mpl.rcParams['xtick.major.size'] = 10
mpl.rcParams['xtick.major.width'] = 4
mpl.rcParams['xtick.minor.size'] = 5
mpl.rcParams['xtick.minor.width'] = 10
plt.rcParams['xtick.labelsize']=30
mpl.rcParams['ytick.major.size'] = 10
mpl.rcParams['ytick.major.width'] = 4
mpl.rcParams['ytick.minor.size'] = 5
mpl.rcParams['ytick.minor.width'] = 10
plt.rcParams['ytick.labelsize']=30
# Plot the figure. Add labels and titles.
plt.figure(figsize=(12,10))
ax = plt.subplot(111)
ax.grid()
ax.set_xlabel("Time [s] ",fontsize=30)
ax.set_ylabel(r"Energy [J] ",fontsize=30)
# #########################################################################
thefile = "Frictional_SDOF_freeVibration.h5.feioutput";
finput = h5py.File(thefile)
# Read the time and displacement
times = finput["time"][:]
disp = finput["/Model/Nodes/Generalized_Displacements"][3,:]
acc = finput["/Model/Nodes/Generalized_Accelerations"][3,:]
n = len(acc);
dt = times[1] - times[0];
vel = acc;
vel[0]=0;
for i in xrange(1,n):
vel[i]=vel[i-1]+(acc[i])*dt;
E_k = 0.5*vel*vel;
E_s = 0.5*100*disp*disp;
# kinetic energy
ax.plot(times,E_k,'b',linewidth=4,label=r'$E_{kinetic}$');
ax.hold(True);
# potential energy
ax.plot(times,E_s,'k',linewidth=4,label=r'$E_{potential}$');
ax.hold(True);
# Contact Energy
# #########################################################################
# Read the time and displacement
times = finput["time"][:]
Fx = finput["/Model/Elements/Element_Outputs"][5,:]
Deltax = finput["/Model/Elements/Element_Outputs"][8,:]
E_inc_d = Fx*Deltax;
E_inc_d[0] =0;
Deltax[0]=0;
E_d = Fx*Deltax;
E_d[0] =0;
for i in xrange(1,n):
E_d[i]=E_d[i-1]+ E_d[i];
ax.plot(times,E_d,'r',linewidth=4,label=r'$E_{dis}$');
ax.hold(True);
# Total Energy
# #########################################################################
E_total = E_k +E_s + E_d;
ax.plot(times,E_total,'g',linewidth=4,label=r'$E_{total}$');
# ax.hold(True);
ax.legend(loc='upper center', bbox_to_anchor=(0.5, 1.155),
ncol=4, fancybox=True, shadow=True)
# plt.show()
pylab.savefig("Total_Energy_Plot_With_Components.pdf", bbox_inches='tight')
ax.hold(False);
fig = plt.figure();
ax = plt.subplot(111)
ax.plot(times,E_inc_d*1000,'r',linewidth=4,label=r'$E^{incr}_{dis}$');
# ax.plot(times,E_d,'b',linewidth=4,label=r'$E_{dis}$');
ax.grid()
ax.set_xlabel("Time [s] ")
ax.set_ylabel(r"Energy [mJ] ")
ax.legend(loc='upper center', bbox_to_anchor=(0.5, 1.24),
ncol=4, fancybox=True, shadow=True)
pylab.savefig("Incremental_Dissipated_Energy.pdf", bbox_inches='tight')
# plt.show()
| cc0-1.0 |
matthew-tucker/mne-python | examples/time_frequency/plot_source_label_time_frequency.py | 19 | 3767 | """
=========================================================
Compute power and phase lock in label of the source space
=========================================================
Compute time-frequency maps of power and phase lock in the source space.
The inverse method is linear based on dSPM inverse operator.
The example also shows the difference in the time-frequency maps
when they are computed with and without subtracting the evoked response
from each epoch. The former results in induced activity only while the
latter also includes evoked (stimulus-locked) activity.
"""
# Authors: Alexandre Gramfort <alexandre.gramfort@telecom-paristech.fr>
#
# License: BSD (3-clause)
import numpy as np
import matplotlib.pyplot as plt
import mne
from mne import io
from mne.datasets import sample
from mne.minimum_norm import read_inverse_operator, source_induced_power
print(__doc__)
###############################################################################
# Set parameters
data_path = sample.data_path()
raw_fname = data_path + '/MEG/sample/sample_audvis_raw.fif'
fname_inv = data_path + '/MEG/sample/sample_audvis-meg-oct-6-meg-inv.fif'
label_name = 'Aud-rh'
fname_label = data_path + '/MEG/sample/labels/%s.label' % label_name
tmin, tmax, event_id = -0.2, 0.5, 2
# Setup for reading the raw data
raw = io.Raw(raw_fname)
events = mne.find_events(raw, stim_channel='STI 014')
inverse_operator = read_inverse_operator(fname_inv)
include = []
raw.info['bads'] += ['MEG 2443', 'EEG 053'] # bads + 2 more
# Picks MEG channels
picks = mne.pick_types(raw.info, meg=True, eeg=False, eog=True,
stim=False, include=include, exclude='bads')
reject = dict(grad=4000e-13, mag=4e-12, eog=150e-6)
# Load epochs
epochs = mne.Epochs(raw, events, event_id, tmin, tmax, picks=picks,
baseline=(None, 0), reject=reject,
preload=True)
# Compute a source estimate per frequency band including and excluding the
# evoked response
frequencies = np.arange(7, 30, 2) # define frequencies of interest
label = mne.read_label(fname_label)
n_cycles = frequencies / 3. # different number of cycle per frequency
# subtract the evoked response in order to exclude evoked activity
epochs_induced = epochs.copy().subtract_evoked()
plt.close('all')
for ii, (this_epochs, title) in enumerate(zip([epochs, epochs_induced],
['evoked + induced',
'induced only'])):
# compute the source space power and phase lock
power, phase_lock = source_induced_power(
this_epochs, inverse_operator, frequencies, label, baseline=(-0.1, 0),
baseline_mode='percent', n_cycles=n_cycles, n_jobs=1)
power = np.mean(power, axis=0) # average over sources
phase_lock = np.mean(phase_lock, axis=0) # average over sources
times = epochs.times
##########################################################################
# View time-frequency plots
plt.subplots_adjust(0.1, 0.08, 0.96, 0.94, 0.2, 0.43)
plt.subplot(2, 2, 2 * ii + 1)
plt.imshow(20 * power,
extent=[times[0], times[-1], frequencies[0], frequencies[-1]],
aspect='auto', origin='lower', vmin=0., vmax=30., cmap='RdBu_r')
plt.xlabel('Time (s)')
plt.ylabel('Frequency (Hz)')
plt.title('Power (%s)' % title)
plt.colorbar()
plt.subplot(2, 2, 2 * ii + 2)
plt.imshow(phase_lock,
extent=[times[0], times[-1], frequencies[0], frequencies[-1]],
aspect='auto', origin='lower', vmin=0, vmax=0.7,
cmap='RdBu_r')
plt.xlabel('Time (s)')
plt.ylabel('Frequency (Hz)')
plt.title('Phase-lock (%s)' % title)
plt.colorbar()
plt.show()
| bsd-3-clause |
DGrady/pandas | pandas/tests/indexes/datetimes/test_missing.py | 15 | 2132 | import pandas as pd
import pandas.util.testing as tm
class TestDatetimeIndex(object):
def test_fillna_datetime64(self):
# GH 11343
for tz in ['US/Eastern', 'Asia/Tokyo']:
idx = pd.DatetimeIndex(['2011-01-01 09:00', pd.NaT,
'2011-01-01 11:00'])
exp = pd.DatetimeIndex(['2011-01-01 09:00', '2011-01-01 10:00',
'2011-01-01 11:00'])
tm.assert_index_equal(
idx.fillna(pd.Timestamp('2011-01-01 10:00')), exp)
# tz mismatch
exp = pd.Index([pd.Timestamp('2011-01-01 09:00'),
pd.Timestamp('2011-01-01 10:00', tz=tz),
pd.Timestamp('2011-01-01 11:00')], dtype=object)
tm.assert_index_equal(
idx.fillna(pd.Timestamp('2011-01-01 10:00', tz=tz)), exp)
# object
exp = pd.Index([pd.Timestamp('2011-01-01 09:00'), 'x',
pd.Timestamp('2011-01-01 11:00')], dtype=object)
tm.assert_index_equal(idx.fillna('x'), exp)
idx = pd.DatetimeIndex(['2011-01-01 09:00', pd.NaT,
'2011-01-01 11:00'], tz=tz)
exp = pd.DatetimeIndex(['2011-01-01 09:00', '2011-01-01 10:00',
'2011-01-01 11:00'], tz=tz)
tm.assert_index_equal(
idx.fillna(pd.Timestamp('2011-01-01 10:00', tz=tz)), exp)
exp = pd.Index([pd.Timestamp('2011-01-01 09:00', tz=tz),
pd.Timestamp('2011-01-01 10:00'),
pd.Timestamp('2011-01-01 11:00', tz=tz)],
dtype=object)
tm.assert_index_equal(
idx.fillna(pd.Timestamp('2011-01-01 10:00')), exp)
# object
exp = pd.Index([pd.Timestamp('2011-01-01 09:00', tz=tz),
'x',
pd.Timestamp('2011-01-01 11:00', tz=tz)],
dtype=object)
tm.assert_index_equal(idx.fillna('x'), exp)
| bsd-3-clause |
blackball/an-test6 | util/sip_plot_distortion.py | 1 | 2423 | import matplotlib
matplotlib.use('Agg')
import sys
from optparse import *
import numpy as np
from pylab import *
from numpy import *
#from astrometry.util.sip import *
from astrometry.util.util import *
def plot_distortions(wcsfn, ex=1, ngridx=10, ngridy=10, stepx=10, stepy=10):
wcs = Sip(wcsfn)
W,H = wcs.wcstan.imagew, wcs.wcstan.imageh
xgrid = np.linspace(0, W, ngridx)
ygrid = np.linspace(0, H, ngridy)
X = np.linspace(0, W, int(ceil(W/stepx)))
Y = np.linspace(0, H, int(ceil(H/stepy)))
xlo,xhi,ylo,yhi = 0,W,0,H
for x in xgrid:
DX,DY = [],[]
xx,yy = [],[]
for y in Y:
dx,dy = wcs.get_distortion(x, y)
xx.append(x)
yy.append(y)
DX.append(dx)
DY.append(dy)
DX = array(DX)
DY = array(DY)
xx = array(xx)
yy = array(yy)
EX = DX + ex * (DX - xx)
EY = DY + ex * (DY - yy)
plot(xx, yy, 'k-', alpha=0.5)
plot(EX, EY, 'r-')
xlo = min(xlo, min(EX))
xhi = max(xhi, max(EX))
ylo = min(ylo, min(EY))
yhi = max(yhi, max(EY))
for y in ygrid:
DX,DY = [],[]
xx,yy = [],[]
for x in X:
dx,dy = wcs.get_distortion(x, y)
DX.append(dx)
DY.append(dy)
xx.append(x)
yy.append(y)
DX = array(DX)
DY = array(DY)
xx = array(xx)
yy = array(yy)
EX = DX + ex * (DX - xx)
EY = DY + ex * (DY - yy)
plot(xx, yy, 'k-', alpha=0.5)
plot(EX, EY, 'r-')
xlo = min(xlo, min(EX))
xhi = max(xhi, max(EX))
ylo = min(ylo, min(EY))
yhi = max(yhi, max(EY))
plot([wcs.wcstan.crpix[0]], [wcs.wcstan.crpix[1]], 'rx')
#axis([0, W, 0, H])
axis('scaled')
axis([xlo,xhi,ylo,yhi])
#axis('tight')
if __name__ == '__main__':
parser = OptionParser(usage='%prog [options] <wcs-filename> <plot-filename>')
parser.add_option('-e', '--ex', '--exaggerate', dest='ex', type='float', help='Exaggerate the distortion by this factor')
#parser.add_option('-s', '--scale', dest='scale', type='float', help='Scale the
parser.add_option('-n', dest='nsteps', type='int', help='Number of grid lines to plot')
parser.set_defaults(ex=1.)
opt,args = parser.parse_args()
if len(args) != 2:
parser.print_help()
sys.exit(-1)
wcsfn = args[0]
outfn = args[1]
args = {}
if opt.ex is not None:
args['ex'] = opt.ex
if opt.nsteps is not None:
args['ngridx'] = opt.nsteps
args['ngridy'] = opt.nsteps
clf()
plot_distortions(wcsfn, **args)
tt = 'SIP distortions: %s' % wcsfn
if opt.ex != 1:
tt += ' (exaggerated by %g)' % opt.ex
title(tt)
savefig(outfn)
| gpl-2.0 |
Nyker510/scikit-learn | examples/decomposition/plot_pca_vs_lda.py | 182 | 1743 | """
=======================================================
Comparison of LDA and PCA 2D projection of Iris dataset
=======================================================
The Iris dataset represents 3 kind of Iris flowers (Setosa, Versicolour
and Virginica) with 4 attributes: sepal length, sepal width, petal length
and petal width.
Principal Component Analysis (PCA) applied to this data identifies the
combination of attributes (principal components, or directions in the
feature space) that account for the most variance in the data. Here we
plot the different samples on the 2 first principal components.
Linear Discriminant Analysis (LDA) tries to identify attributes that
account for the most variance *between classes*. In particular,
LDA, in contrast to PCA, is a supervised method, using known class labels.
"""
print(__doc__)
import matplotlib.pyplot as plt
from sklearn import datasets
from sklearn.decomposition import PCA
from sklearn.lda import LDA
iris = datasets.load_iris()
X = iris.data
y = iris.target
target_names = iris.target_names
pca = PCA(n_components=2)
X_r = pca.fit(X).transform(X)
lda = LDA(n_components=2)
X_r2 = lda.fit(X, y).transform(X)
# Percentage of variance explained for each components
print('explained variance ratio (first two components): %s'
% str(pca.explained_variance_ratio_))
plt.figure()
for c, i, target_name in zip("rgb", [0, 1, 2], target_names):
plt.scatter(X_r[y == i, 0], X_r[y == i, 1], c=c, label=target_name)
plt.legend()
plt.title('PCA of IRIS dataset')
plt.figure()
for c, i, target_name in zip("rgb", [0, 1, 2], target_names):
plt.scatter(X_r2[y == i, 0], X_r2[y == i, 1], c=c, label=target_name)
plt.legend()
plt.title('LDA of IRIS dataset')
plt.show()
| bsd-3-clause |
robertwb/incubator-beam | sdks/python/apache_beam/dataframe/convert.py | 6 | 9567 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import inspect
import weakref
from typing import TYPE_CHECKING
from typing import Any
from typing import Dict
from typing import Tuple
from typing import Union
import pandas as pd
import apache_beam as beam
from apache_beam import pvalue
from apache_beam.dataframe import expressions
from apache_beam.dataframe import frame_base
from apache_beam.dataframe import schemas
from apache_beam.dataframe import transforms
if TYPE_CHECKING:
# pylint: disable=ungrouped-imports
from typing import Optional
# TODO: Or should this be called as_dataframe?
def to_dataframe(
pcoll, # type: pvalue.PCollection
proxy=None, # type: Optional[pd.core.generic.NDFrame]
label=None, # type: Optional[str]
):
# type: (...) -> frame_base.DeferredFrame
"""Converts a PCollection to a deferred dataframe-like object, which can
manipulated with pandas methods like `filter` and `groupby`.
For example, one might write::
pcoll = ...
df = to_dataframe(pcoll, proxy=...)
result = df.groupby('col').sum()
pcoll_result = to_pcollection(result)
A proxy object must be given if the schema for the PCollection is not known.
"""
if proxy is None:
if pcoll.element_type is None:
raise ValueError(
"Cannot infer a proxy because the input PCollection does not have a "
"schema defined. Please make sure a schema type is specified for "
"the input PCollection, or provide a proxy.")
# If no proxy is given, assume this is an element-wise schema-aware
# PCollection that needs to be batched.
if label is None:
# Attempt to come up with a reasonable, stable label by retrieving
# the name of these variables in the calling context.
label = 'BatchElements(%s)' % _var_name(pcoll, 2)
proxy = schemas.generate_proxy(pcoll.element_type)
pcoll = pcoll | label >> schemas.BatchRowsAsDataFrame(proxy=proxy)
return frame_base.DeferredFrame.wrap(
expressions.PlaceholderExpression(proxy, pcoll))
# PCollections generated by to_pcollection are memoized, keyed by expression id.
# WeakValueDictionary is used so the caches are cleaned up with the parent
# pipelines
# Note that the pipeline (indirectly) holds references to the transforms which
# keeps both the PCollections and expressions alive. This ensures the
# expression's ids are never accidentally re-used.
TO_PCOLLECTION_CACHE = weakref.WeakValueDictionary(
) # type: weakref.WeakValueDictionary[str, pvalue.PCollection]
UNBATCHED_CACHE = weakref.WeakValueDictionary(
) # type: weakref.WeakValueDictionary[str, pvalue.PCollection]
def _make_unbatched_pcoll(
pc: pvalue.PCollection, expr: expressions.Expression,
include_indexes: bool):
label = f"Unbatch '{expr._id}'"
if include_indexes:
label += " with indexes"
if label not in UNBATCHED_CACHE:
UNBATCHED_CACHE[label] = pc | label >> schemas.UnbatchPandas(
expr.proxy(), include_indexes=include_indexes)
# Note unbatched cache is keyed by the expression id as well as parameters
# for the unbatching (i.e. include_indexes)
return UNBATCHED_CACHE[label]
# TODO: Or should this be called from_dataframe?
def to_pcollection(
*dataframes, # type: Union[frame_base.DeferredFrame, pd.DataFrame, pd.Series]
label=None,
always_return_tuple=False,
yield_elements='schemas',
include_indexes=False,
pipeline=None) -> Union[pvalue.PCollection, Tuple[pvalue.PCollection, ...]]:
"""Converts one or more deferred dataframe-like objects back to a PCollection.
This method creates and applies the actual Beam operations that compute
the given deferred dataframes, returning a PCollection of their results. By
default the resulting PCollections are schema-aware PCollections where each
element is one row from the output dataframes, excluding indexes. This
behavior can be modified with the `yield_elements` and `include_indexes`
arguments.
Also accepts non-deferred pandas dataframes, which are converted to deferred,
schema'd PCollections. In this case the contents of the entire dataframe are
serialized into the graph, so for large amounts of data it is preferable to
write them to disk and read them with one of the read methods.
If more than one (related) result is desired, it can be more efficient to
pass them all at the same time to this method.
Args:
label: (optional, default "ToPCollection(...)"") the label to use for the
conversion transform.
always_return_tuple: (optional, default: False) If true, always return
a tuple of PCollections, even if there's only one output.
yield_elements: (optional, default: "schemas") If set to "pandas", return
PCollections containing the raw Pandas objects (DataFrames or Series),
if set to "schemas", return an element-wise PCollection, where DataFrame
and Series instances are expanded to one element per row. DataFrames are
converted to schema-aware PCollections, where column values can be
accessed by attribute.
include_indexes: (optional, default: False) When yield_elements="schemas",
if include_indexes=True, attempt to include index columns in the output
schema for expanded DataFrames. Raises an error if any of the index
levels are unnamed (name=None), or if any of the names are not unique
among all column and index names.
pipeline: (optional, unless non-deferred dataframes are passed) Used when
creating a PCollection from a non-deferred dataframe.
"""
if not yield_elements in ("pandas", "schemas"):
raise ValueError(
"Invalid value for yield_elements argument, '%s'. "
"Allowed values are 'pandas' and 'schemas'" % yield_elements)
if label is None:
# Attempt to come up with a reasonable, stable label by retrieving the name
# of these variables in the calling context.
label = 'ToPCollection(%s)' % ', '.join(_var_name(e, 3) for e in dataframes)
# Support for non-deferred dataframes.
deferred_dataframes = []
for ix, df in enumerate(dataframes):
if isinstance(df, frame_base.DeferredBase):
# TODO(robertwb): Maybe extract pipeline object?
deferred_dataframes.append(df)
elif isinstance(df, (pd.Series, pd.DataFrame)):
if pipeline is None:
raise ValueError(
'Pipeline keyword required for non-deferred dataframe conversion.')
deferred = pipeline | '%s_Defer%s' % (label, ix) >> beam.Create([df])
deferred_dataframes.append(
frame_base.DeferredFrame.wrap(
expressions.PlaceholderExpression(df.iloc[:0], deferred)))
else:
raise TypeError(
'Unable to convert objects of type %s to a PCollection' % type(df))
dataframes = tuple(deferred_dataframes)
def extract_input(placeholder):
if not isinstance(placeholder._reference, pvalue.PCollection):
raise TypeError(
'Expression roots must have been created with to_dataframe.')
return placeholder._reference
placeholders = frozenset.union(
frozenset(), *[df._expr.placeholders() for df in dataframes])
# Exclude any dataframes that have already been converted to PCollections.
# We only want to convert each DF expression once, then re-use.
new_dataframes = [
df for df in dataframes if df._expr._id not in TO_PCOLLECTION_CACHE
]
if len(new_dataframes):
new_results = {p: extract_input(p)
for p in placeholders
} | label >> transforms._DataframeExpressionsTransform({
ix: df._expr
for (ix, df) in enumerate(new_dataframes)
}) # type: Dict[Any, pvalue.PCollection]
TO_PCOLLECTION_CACHE.update(
{new_dataframes[ix]._expr._id: pc
for ix, pc in new_results.items()})
raw_results = {
ix: TO_PCOLLECTION_CACHE[df._expr._id]
for ix,
df in enumerate(dataframes)
}
if yield_elements == "schemas":
def maybe_unbatch(pc, value):
if isinstance(value, frame_base._DeferredScalar):
return pc
else:
return _make_unbatched_pcoll(pc, value._expr, include_indexes)
results = {
ix: maybe_unbatch(pc, dataframes[ix])
for (ix, pc) in raw_results.items()
}
else:
results = raw_results
if len(results) == 1 and not always_return_tuple:
return results[0]
else:
return tuple(value for key, value in sorted(results.items()))
def _var_name(obj, level):
frame = inspect.currentframe()
for _ in range(level):
if frame is None:
return '...'
frame = frame.f_back
for key, value in frame.f_locals.items():
if obj is value:
return key
for key, value in frame.f_globals.items():
if obj is value:
return key
return '...'
| apache-2.0 |
pap/nupic | external/linux32/lib/python2.6/site-packages/matplotlib/pylab.py | 70 | 10245 | """
This is a procedural interface to the matplotlib object-oriented
plotting library.
The following plotting commands are provided; the majority have
Matlab(TM) analogs and similar argument.
_Plotting commands
acorr - plot the autocorrelation function
annotate - annotate something in the figure
arrow - add an arrow to the axes
axes - Create a new axes
axhline - draw a horizontal line across axes
axvline - draw a vertical line across axes
axhspan - draw a horizontal bar across axes
axvspan - draw a vertical bar across axes
axis - Set or return the current axis limits
bar - make a bar chart
barh - a horizontal bar chart
broken_barh - a set of horizontal bars with gaps
box - set the axes frame on/off state
boxplot - make a box and whisker plot
cla - clear current axes
clabel - label a contour plot
clf - clear a figure window
clim - adjust the color limits of the current image
close - close a figure window
colorbar - add a colorbar to the current figure
cohere - make a plot of coherence
contour - make a contour plot
contourf - make a filled contour plot
csd - make a plot of cross spectral density
delaxes - delete an axes from the current figure
draw - Force a redraw of the current figure
errorbar - make an errorbar graph
figlegend - make legend on the figure rather than the axes
figimage - make a figure image
figtext - add text in figure coords
figure - create or change active figure
fill - make filled polygons
findobj - recursively find all objects matching some criteria
gca - return the current axes
gcf - return the current figure
gci - get the current image, or None
getp - get a handle graphics property
grid - set whether gridding is on
hist - make a histogram
hold - set the axes hold state
ioff - turn interaction mode off
ion - turn interaction mode on
isinteractive - return True if interaction mode is on
imread - load image file into array
imshow - plot image data
ishold - return the hold state of the current axes
legend - make an axes legend
loglog - a log log plot
matshow - display a matrix in a new figure preserving aspect
pcolor - make a pseudocolor plot
pcolormesh - make a pseudocolor plot using a quadrilateral mesh
pie - make a pie chart
plot - make a line plot
plot_date - plot dates
plotfile - plot column data from an ASCII tab/space/comma delimited file
pie - pie charts
polar - make a polar plot on a PolarAxes
psd - make a plot of power spectral density
quiver - make a direction field (arrows) plot
rc - control the default params
rgrids - customize the radial grids and labels for polar
savefig - save the current figure
scatter - make a scatter plot
setp - set a handle graphics property
semilogx - log x axis
semilogy - log y axis
show - show the figures
specgram - a spectrogram plot
spy - plot sparsity pattern using markers or image
stem - make a stem plot
subplot - make a subplot (numrows, numcols, axesnum)
subplots_adjust - change the params controlling the subplot positions of current figure
subplot_tool - launch the subplot configuration tool
suptitle - add a figure title
table - add a table to the plot
text - add some text at location x,y to the current axes
thetagrids - customize the radial theta grids and labels for polar
title - add a title to the current axes
xcorr - plot the autocorrelation function of x and y
xlim - set/get the xlimits
ylim - set/get the ylimits
xticks - set/get the xticks
yticks - set/get the yticks
xlabel - add an xlabel to the current axes
ylabel - add a ylabel to the current axes
autumn - set the default colormap to autumn
bone - set the default colormap to bone
cool - set the default colormap to cool
copper - set the default colormap to copper
flag - set the default colormap to flag
gray - set the default colormap to gray
hot - set the default colormap to hot
hsv - set the default colormap to hsv
jet - set the default colormap to jet
pink - set the default colormap to pink
prism - set the default colormap to prism
spring - set the default colormap to spring
summer - set the default colormap to summer
winter - set the default colormap to winter
spectral - set the default colormap to spectral
_Event handling
connect - register an event handler
disconnect - remove a connected event handler
_Matrix commands
cumprod - the cumulative product along a dimension
cumsum - the cumulative sum along a dimension
detrend - remove the mean or besdt fit line from an array
diag - the k-th diagonal of matrix
diff - the n-th differnce of an array
eig - the eigenvalues and eigen vectors of v
eye - a matrix where the k-th diagonal is ones, else zero
find - return the indices where a condition is nonzero
fliplr - flip the rows of a matrix up/down
flipud - flip the columns of a matrix left/right
linspace - a linear spaced vector of N values from min to max inclusive
logspace - a log spaced vector of N values from min to max inclusive
meshgrid - repeat x and y to make regular matrices
ones - an array of ones
rand - an array from the uniform distribution [0,1]
randn - an array from the normal distribution
rot90 - rotate matrix k*90 degress counterclockwise
squeeze - squeeze an array removing any dimensions of length 1
tri - a triangular matrix
tril - a lower triangular matrix
triu - an upper triangular matrix
vander - the Vandermonde matrix of vector x
svd - singular value decomposition
zeros - a matrix of zeros
_Probability
levypdf - The levy probability density function from the char. func.
normpdf - The Gaussian probability density function
rand - random numbers from the uniform distribution
randn - random numbers from the normal distribution
_Statistics
corrcoef - correlation coefficient
cov - covariance matrix
amax - the maximum along dimension m
mean - the mean along dimension m
median - the median along dimension m
amin - the minimum along dimension m
norm - the norm of vector x
prod - the product along dimension m
ptp - the max-min along dimension m
std - the standard deviation along dimension m
asum - the sum along dimension m
_Time series analysis
bartlett - M-point Bartlett window
blackman - M-point Blackman window
cohere - the coherence using average periodiogram
csd - the cross spectral density using average periodiogram
fft - the fast Fourier transform of vector x
hamming - M-point Hamming window
hanning - M-point Hanning window
hist - compute the histogram of x
kaiser - M length Kaiser window
psd - the power spectral density using average periodiogram
sinc - the sinc function of array x
_Dates
date2num - convert python datetimes to numeric representation
drange - create an array of numbers for date plots
num2date - convert numeric type (float days since 0001) to datetime
_Other
angle - the angle of a complex array
griddata - interpolate irregularly distributed data to a regular grid
load - load ASCII data into array
polyfit - fit x, y to an n-th order polynomial
polyval - evaluate an n-th order polynomial
roots - the roots of the polynomial coefficients in p
save - save an array to an ASCII file
trapz - trapezoidal integration
__end
"""
import sys, warnings
from cbook import flatten, is_string_like, exception_to_str, popd, \
silent_list, iterable, dedent
import numpy as np
from numpy import ma
from matplotlib import mpl # pulls in most modules
from matplotlib.dates import date2num, num2date,\
datestr2num, strpdate2num, drange,\
epoch2num, num2epoch, mx2num,\
DateFormatter, IndexDateFormatter, DateLocator,\
RRuleLocator, YearLocator, MonthLocator, WeekdayLocator,\
DayLocator, HourLocator, MinuteLocator, SecondLocator,\
rrule, MO, TU, WE, TH, FR, SA, SU, YEARLY, MONTHLY,\
WEEKLY, DAILY, HOURLY, MINUTELY, SECONDLY, relativedelta
import matplotlib.dates
# bring all the symbols in so folks can import them from
# pylab in one fell swoop
from matplotlib.mlab import window_hanning, window_none,\
conv, detrend, detrend_mean, detrend_none, detrend_linear,\
polyfit, polyval, entropy, normpdf, griddata,\
levypdf, find, trapz, prepca, rem, norm, orth, rank,\
sqrtm, prctile, center_matrix, rk4, exp_safe, amap,\
sum_flat, mean_flat, rms_flat, l1norm, l2norm, norm, frange,\
diagonal_matrix, base_repr, binary_repr, log2, ispower2,\
bivariate_normal, load, save
from matplotlib.mlab import stineman_interp, slopes, \
stineman_interp, inside_poly, poly_below, poly_between, \
is_closed_polygon, path_length, distances_along_curve, vector_lengths
from numpy import *
from numpy.fft import *
from numpy.random import *
from numpy.linalg import *
from matplotlib.mlab import window_hanning, window_none, conv, detrend, demean, \
detrend_mean, detrend_none, detrend_linear, entropy, normpdf, levypdf, \
find, longest_contiguous_ones, longest_ones, prepca, prctile, prctile_rank, \
center_matrix, rk4, bivariate_normal, get_xyz_where, get_sparse_matrix, dist, \
dist_point_to_segment, segments_intersect, fftsurr, liaupunov, movavg, \
save, load, exp_safe, \
amap, rms_flat, l1norm, l2norm, norm_flat, frange, diagonal_matrix, identity, \
base_repr, binary_repr, log2, ispower2, fromfunction_kw, rem, norm, orth, rank, sqrtm,\
mfuncC, approx_real, rec_append_field, rec_drop_fields, rec_join, csv2rec, rec2csv, isvector
from matplotlib.pyplot import *
# provide the recommended module abbrevs in the pylab namespace
import matplotlib.pyplot as plt
import numpy as np
| agpl-3.0 |
clsb/miles | miles/plot.py | 1 | 12891 | """Module for plotting routines.
"""
__all__ = ['plot']
import os
import sys
from typing import Optional, Tuple
import scipy.ndimage
import scipy.linalg
import scipy.spatial
matplotlib = None # type: Optional[module]
plt = None # type: Optional[module]
sns = None # type: Optional[module]
import numpy as np
from miles import Milestones, Simulation, load_distributions # noqa: E501
EPSILON = sys.float_info.epsilon
TICKS = [-180, -90, 0, 90, 180]
def latex_preamble() -> None:
"""LaTeX preamble for publication-quality figures.
"""
fig_width_pt = 397.48499
inches_per_pt = 1.0/72.27
golden_mean = (np.sqrt(5)-1.0) / 2.0
fig_width = 1.4 * fig_width_pt * inches_per_pt
fig_height = fig_width * 1.25 * golden_mean
fig_size = (fig_width, fig_height)
params = {
'backend': 'pdf',
'pgf.texsystem': 'pdflatex',
'font.family': 'serif',
'font.serif': 'cm',
'font.size': 12,
'axes.labelsize': 12,
'xtick.labelsize': 12,
'ytick.labelsize': 12,
'text.usetex': True,
'text.latex.unicode': True,
'figure.figsize': fig_size
}
matplotlib.rcParams.update(params)
plt.axes([0.2, 0.2, 1.0 - 0.2, 1.0 - 0.2])
plt.figure(figsize=fig_size)
def set_xticks() -> None:
plt.axes().set_xticks(TICKS)
plt.axes().set_xlim([TICKS[0], TICKS[-1]])
def set_yticks() -> None:
plt.axes().set_yticks(TICKS)
plt.axes().set_ylim([TICKS[0], TICKS[-1]])
def pbc(points: np.array) -> np.array:
"""Apply periodic boundary conditions to a set of points.
"""
return np.concatenate((points,
points + np.array([0, -360]),
points + np.array([0, 360]),
points + np.array([-360, -360]),
points + np.array([-360, 0]),
points + np.array([-360, +360]),
points + np.array([360, -360]),
points + np.array([360, 0]),
points + np.array([360, 360])))
def plot_voronoi(vor: scipy.spatial.Voronoi, milestones: Milestones,
data: np.array, ax: 'matplotlib.axes._axes.Axes', **kwargs):
"""Plot 2D Voronoi diagram.
Parameters
----------
vor : scipy.spatial.Voronoi
Voronoi tessellation to plot.
milestones : Milestones
Milestone factory.
data : np.array
Dataset to plot onto the milestones.
ax : matplotlib.axes._axes.Axes
Axes where the figure will be plotted.
Returns
-------
fig : matplotlib.figure.Figure
Matplotlib figure.
"""
if vor.points.shape[1] != 2:
raise ValueError("Voronoi diagram is not 2-D")
if data is None:
data = np.ones(milestones.max_milestones)
plot_colorbar = False
else:
plot_colorbar = True
import matplotlib.cm as cm
cmap = cm.get_cmap(kwargs['colors'])
valid_elements = np.logical_not(np.isnan(data))
minimum = np.min(data[valid_elements])
maximum = np.max(data[valid_elements])
norm = cm.colors.Normalize(vmin=minimum, vmax=maximum)
alpha = 0.9
sz = kwargs['marker_size']
labels = kwargs['labels']
num_anchors = len(milestones.anchors)
for pointidx, simplex in zip(vor.ridge_points, vor.ridge_vertices):
simplex = np.asarray(simplex)
if np.all(simplex >= 0):
try:
m = milestones.make_from_indices(pointidx[0] % num_anchors,
pointidx[1] % num_anchors)
except IndexError:
continue
if labels:
point = vor.vertices[simplex, :].mean(axis=0)
ax.text(point[0], point[1], [a.index for a in m.anchors],
clip_on=True, size=sz, zorder=10)
datum = data[m.index]
if not np.isnan(datum):
c = cmap(norm(datum))
ax.plot(vor.vertices[simplex, 0], vor.vertices[simplex, 1],
color=c, alpha=alpha, linewidth=sz, zorder=-10+datum)
length_far_point = 1e6 # XXX hard-coded
center = vor.points.mean(axis=0)
for pointidx, simplex in zip(vor.ridge_points, vor.ridge_vertices):
simplex = np.asarray(simplex)
if np.any(simplex < 0):
i = simplex[simplex >= 0][0] # finite end Voronoi vertex
t = vor.points[pointidx[1]] - vor.points[pointidx[0]] # tangent
t /= np.linalg.norm(t)
n = np.array([-t[1], t[0]]) # normal
midpoint = vor.points[pointidx].mean(axis=0)
direction = np.sign(np.dot(midpoint - center, n)) * n
far_point = vor.vertices[i] + length_far_point * direction
try:
m = milestones.make_from_indices(pointidx[0] % num_anchors,
pointidx[1] % num_anchors)
except IndexError:
continue
if labels:
point = vor.vertices[i] + direction * 0.5 # XXX hardcoded
ax.text(point[0], point[1], [a.index for a in m.anchors],
clip_on=True, size=sz, zorder=10)
datum = data[m.index]
if not np.isnan(datum):
c = cmap(norm(datum))
ax.plot([vor.vertices[i, 0], far_point[0]],
[vor.vertices[i, 1], far_point[1]], color=c,
alpha=alpha, linewidth=sz, zorder=-10+datum)
if plot_colorbar:
ax1, _ = matplotlib.colorbar.make_axes(ax)
cb = matplotlib.colorbar.ColorbarBase(ax1, cmap=cmap, norm=norm)
if kwargs['colorbar_title']:
cb.set_label(kwargs['colorbar_title'])
text = cb.ax.yaxis.label
font = matplotlib.font_manager.FontProperties(style='normal')
text.set_font_properties(font)
return ax.figure
def plot_free_energy():
# from matplotlib.colors import LogNorm
ala3 = np.load('ala3.npy')
d = ala3[:, [1, -1]]
num_bins = 50
zz, xx, yy = scipy.histogram2d(d[:, 0], d[:, 1], bins=num_bins,
normed=True)
x, y = np.meshgrid(xx[:-1], yy[:-1])
z = scipy.ndimage.gaussian_filter(zz, sigma=1.0, order=0)
plt.contour(y, x, z, colors='k', zorder=-900)
plt.imshow(z.T, extent=(-180, 180, -180, 180), origin='lower',
cmap='Greys', zorder=-1000)
# cmap='YlGnBu', zorder=-1000)
# plt.colorbar(label='Density')
def plot_milestones(simulation: Simulation, **kwargs) -> None:
"""Plot a dataset on the faces of a Voronoi tessellation.
"""
milestones = simulation.milestones
anchors = milestones.anchors
assert len(anchors) >= 2
if kwargs['input']:
input_files = kwargs['input']
input_file_name = input_files[0]
try:
data = np.load(input_file_name)
except OSError:
data = np.loadtxt(input_file_name)
else:
data = None
sns.set(style='ticks')
if kwargs['output']:
latex_preamble()
plot_free_energy()
coordinates = np.array([a.coordinates for a in anchors])
voronoi = scipy.spatial.Voronoi(pbc(coordinates)) # XXX PBC hardcoded
# voronoi = scipy.spatial.Voronoi(coordinates, qhull_options='Qp Pp')
plot_voronoi(voronoi, milestones, data, ax=plt.axes(), **kwargs)
set_xticks()
set_yticks()
plt.axes().set_aspect('equal')
if kwargs['title']:
plt.title(kwargs['title'])
if kwargs['xlabel']:
plt.xlabel(kwargs['xlabel'])
if kwargs['ylabel']:
plt.ylabel(kwargs['ylabel'])
if kwargs['output']:
plt.savefig(kwargs['output'])
else:
plt.show()
def orthogonal_distance_regression(data: np.array) \
-> Tuple[np.array, np.array]:
"""Fit data to best hyperplane (best in the least-squares sense).
Parameters
----------
data : np.array
Points to fit.
Returns
-------
projected_points : np.array
The points projected onto the best hyperplane.
coordinates : np.array
The (n-1) coordinates of the points on the best hyperplane.
"""
d = np.array(data, dtype=data.dtype)
center_of_mass = np.mean(d, axis=0)
d -= center_of_mass
U, D, V = scipy.linalg.svd(d.T, full_matrices=False)
# Subspace spanned by the best hyperplane.
span = U[:, 0:-1]
# Projection of the data points onto the best hyperplane.
projection = np.matrix(center_of_mass) + d @ span @ span.T
# Coordinates of the data points in the hyperplane. Note that
# since U is an orthogonal matrix, its inverse coincides with its
# transpose.
d += center_of_mass
coordinates = -U.T @ d.T
return np.array(projection), coordinates[0:-1, :].T
def sample_data(distribution, key, num_samples=1000): # XXX Hardcoded.
"""Draw samples from a given distribution.
"""
data = []
for _ in range(num_samples):
transition = distribution.sample()
data.append(key(transition))
return np.array(data)
def get_rows_and_columns(num_milestones: int) -> Tuple[int, int]:
"""Get optimal number of rows and columns to display histograms.
Parameters
----------
num_milestones : int
Number of milestones
Returns
-------
rows : int
Optimal number of rows.
cols : int
Optimal number of columns.
"""
if num_milestones <= 10:
layouts = {
1: (1, 1), 2: (1, 2), 3: (1, 3), 4: (2, 2), 5: (2, 3),
6: (2, 3), 7: (2, 4), 8: (2, 4), 9: (3, 9), 10: (2, 5)
}
rows, cols = layouts[num_milestones]
else:
rows = int(np.ceil(np.sqrt(num_milestones)))
cols = rows
return rows, cols
def plot_histograms(simulation: Simulation, **kwargs) -> None:
"""Plot histograms on milestones.
"""
sns.set(style='ticks')
if kwargs['output'] is not None:
latex_preamble()
all_distributions = []
known_milestones = set()
file_names = kwargs['input']
num_bins = kwargs['num_bins']
min_value, max_value = kwargs['min_value'], kwargs['max_value']
for file_name in file_names:
dists = load_distributions(file_name)
all_distributions.append(dists)
known_milestones = known_milestones.union({m for m in dists.keys()})
plot_kde = len(file_names) > 1
if num_bins:
pts = np.linspace(-180, 180, num_bins) # XXX hardcoded.
bins = pts[:-1]
plot_hist = True
else:
bins = None
plot_hist = not plot_kde
rows, cols = get_rows_and_columns(len(known_milestones))
for idx, milestone in enumerate(sorted(known_milestones)):
for file_name, dists in zip(file_names, all_distributions):
try:
distribution = dists[milestone]
except KeyError:
continue
data = sample_data(distribution,
key=lambda x: x.colvars)
_, x = orthogonal_distance_regression(data)
# x = data[:, 1] # XXX BUG
plt.subplot(rows, cols, idx + 1)
_, name = os.path.split(file_name)
sns.distplot(x, hist=plot_hist, norm_hist=True,
kde=plot_kde, bins=bins, label=name)
sns.despine()
plt.xlabel('Position at milestone')
plt.xlim([TICKS[0], TICKS[-1]])
plt.xticks(TICKS)
plt.ylabel('Density')
if min_value is not None and max_value is not None:
plt.ylim([min_value, max_value])
plt.yticks([])
plt.legend()
plt.title(milestone)
plt.tight_layout()
if kwargs['output']:
plt.savefig(kwargs['output'])
else:
plt.show()
def import_modules() -> None:
"""Import slow-loading modules."""
global matplotlib
import matplotlib
# matplotlib.use('pgf')
global plt
import matplotlib.pyplot as plt
global sns
import seaborn as sns
def plot(simulation: Simulation, **kwargs) -> None:
"""Plot simulation results.
Parameters
----------
simulation : Simulation
Object containing all the relevant info about the simulation.
"""
import_modules()
# cv = kwargs['colvars']
# colvars_spec = [int(c) for c in cv.strip().split(',')]
#
# print(colvars_spec)
#
# collective_variables = simulation.collective_variables
#
# cvs = []
# for i, cv in enumerate(collective_variables.collective_variables):
# cvs.append(cv)
if kwargs['histograms']:
plot_histograms(simulation, **kwargs)
else:
plot_milestones(simulation, **kwargs)
| mit |
ldirer/scikit-learn | examples/linear_model/plot_multi_task_lasso_support.py | 102 | 2319 | #!/usr/bin/env python
"""
=============================================
Joint feature selection with multi-task Lasso
=============================================
The multi-task lasso allows to fit multiple regression problems
jointly enforcing the selected features to be the same across
tasks. This example simulates sequential measurements, each task
is a time instant, and the relevant features vary in amplitude
over time while being the same. The multi-task lasso imposes that
features that are selected at one time point are select for all time
point. This makes feature selection by the Lasso more stable.
"""
print(__doc__)
# Author: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# License: BSD 3 clause
import matplotlib.pyplot as plt
import numpy as np
from sklearn.linear_model import MultiTaskLasso, Lasso
rng = np.random.RandomState(42)
# Generate some 2D coefficients with sine waves with random frequency and phase
n_samples, n_features, n_tasks = 100, 30, 40
n_relevant_features = 5
coef = np.zeros((n_tasks, n_features))
times = np.linspace(0, 2 * np.pi, n_tasks)
for k in range(n_relevant_features):
coef[:, k] = np.sin((1. + rng.randn(1)) * times + 3 * rng.randn(1))
X = rng.randn(n_samples, n_features)
Y = np.dot(X, coef.T) + rng.randn(n_samples, n_tasks)
coef_lasso_ = np.array([Lasso(alpha=0.5).fit(X, y).coef_ for y in Y.T])
coef_multi_task_lasso_ = MultiTaskLasso(alpha=1.).fit(X, Y).coef_
###############################################################################
# Plot support and time series
fig = plt.figure(figsize=(8, 5))
plt.subplot(1, 2, 1)
plt.spy(coef_lasso_)
plt.xlabel('Feature')
plt.ylabel('Time (or Task)')
plt.text(10, 5, 'Lasso')
plt.subplot(1, 2, 2)
plt.spy(coef_multi_task_lasso_)
plt.xlabel('Feature')
plt.ylabel('Time (or Task)')
plt.text(10, 5, 'MultiTaskLasso')
fig.suptitle('Coefficient non-zero location')
feature_to_plot = 0
plt.figure()
lw = 2
plt.plot(coef[:, feature_to_plot], color='seagreen', linewidth=lw,
label='Ground truth')
plt.plot(coef_lasso_[:, feature_to_plot], color='cornflowerblue', linewidth=lw,
label='Lasso')
plt.plot(coef_multi_task_lasso_[:, feature_to_plot], color='gold', linewidth=lw,
label='MultiTaskLasso')
plt.legend(loc='upper center')
plt.axis('tight')
plt.ylim([-1.1, 1.1])
plt.show()
| bsd-3-clause |
misterwindupbird/IBO | demo.py | 1 | 7518 | # Copyright (C) 2010, 2011 by Eric Brochu
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
#!/usr/bin/env python
# encoding: utf-8
"""
demo.py
Just a little script to demonstrate how to call the interactive Bayesian optimization (IBO/EGO) code.
Created by Eric on 2010-03-20.
"""
from copy import deepcopy
from numpy import array, arange
from matplotlib.pylab import *
from ego.gaussianprocess import GaussianProcess, PrefGaussianProcess
from ego.gaussianprocess.kernel import GaussianKernel_ard
from ego.acquisition import EI, UCB, maximizeEI, maximizeUCB
from ego.acquisition.prefutil import query2prefs
from ego.acquisition.gallery import fastUCBGallery
from ego.utils.testfunctions import Hartman6
def demoObservations():
"""
Simple demo for a scenario where we have direct observations (ie ratings
or responses) with noise. The model has three parameters, but after
initial training, we fix one to be 1.0 and optimize the other two. At
each step, we visualize the posterior mean, variance and expected
improvement. We then find the point of maximum expected improvement and
ask the user for the scalar response value.
To see how the model adapts to inputs, try rating the first few values
higher or lower than predicted and see what happens to the visualizations.
"""
# the kernel parameters control the impact of different values on the
# parameters. we are defining a model with three parameters
kernel = GaussianKernel_ard(array([.5, .5, .3]))
# we want to allow some noise in the observations -- the noise parameter
# is the variance of the additive Gaussian noise Y + N(0, noise)
noise = 0.1
# create the Gaussian Process using the kernel we've just defined
GP = GaussianProcess(kernel, noise=noise)
# add some data to the model. the data must have the same dimensionality
# as the kernel
X = [array([1, 1.5, 0.9]),
array([.8, -.2, -0.1]),
array([2, .8, -.2]),
array([0, 0, .5])]
Y = [1, .7, .6, -.1]
print 'adding data to model'
for x, y in zip(X, Y):
print '\tx = %s, y = %.1f' % (x, y)
GP.addData(X, Y)
# the GP.posterior(x) function returns, for x, the posterior distribution
# at x, characterized as a normal distribution with mean mu, variance
# sigma^2
testX = [array([1, 1.45, 1.0]),
array([-10, .5, -10])]
for tx in testX:
mu, sig2 = GP.posterior(tx)
print 'the posterior of %s is a normal distribution N(%.3f, %.3f)' % (tx, mu, sig2)
# now, let's find the best points to evaluate next. we fix the first
# dimension to be 1 and for the others, we search the range [-2, 2]
bound = [[1, 1], [-1.99, 1.98], [-1.99, 1.98]]
figure(1, figsize=(5, 10))
while True:
_, optx = maximizeEI(GP, bound, xi=.1)
# visualize the mean, variance and expected improvement functions on
# the free parameters
x1 = arange(bound[1][0], bound[1][1], 0.1)
x2 = arange(bound[2][0], bound[2][1], 0.1)
X1, X2 = meshgrid(x1, x2)
ei = zeros_like(X1)
m = zeros_like(X1)
v = zeros_like(X1)
for i in xrange(X1.shape[0]):
for j in xrange(X1.shape[1]):
z = array([1.0, X1[i,j], X2[i,j]])
ei[i,j] = -EI(GP).negf(z)
m[i,j], v[i,j] = GP.posterior(z)
clf()
for i, (func, title) in enumerate(([m, 'prediction (posterior mean)'], [v, 'uncertainty (posterior variance)'], [ei, 'utility (expected improvement)'])):
ax = subplot(3, 1, i+1)
cs = ax.contourf(X1, X2, func, 20)
ax.plot(optx[1], optx[2], 'wo')
colorbar(cs)
ax.set_title(title)
ax.set_xlabel('x[1]')
ax.set_ylabel('x[2]')
ax.set_xticks([-2,0,2])
ax.set_yticks([-2,0,2])
show()
m, v = GP.posterior(optx)
try:
response = input('\nmaximum expected improvement is at parameters x = [%.3f, %.3f, %.3f], where mean is %.3f, variance is %.3f. \nwhat is the value there (non-numeric to quit)? ' % (optx[0], optx[1], optx[2], m, v))
except:
break
GP.addData(optx, response)
print 'updating model.'
def demoPrefGallery():
"""
A simpler demo, showing how to use a preference gallery. This demo
is not interactive -- it uses the 6D Hartman test function to generate
the preferences.
"""
N = 3 # gallery size
# use the Hartman6 test function, which has a kernel and bounds predefined
tf = Hartman6()
bounds = tf.bounds
kernel = tf.createKernel(GaussianKernel_ard)
# set up a Preference Gaussian Process, in which the observations are
# preferences, rather than scalars
GP = PrefGaussianProcess(kernel)
# initial preferences -- since we have no informative prior on the space,
# the gallery will be a set of points that maximize variance
gallery = fastUCBGallery(GP, bounds, N)
# this is a utility function for automtically testing the preferences --
# given a test functions and some sample points, it will return a list of
# preferences
prefs = query2prefs(gallery, tf.f)
# preferences have the form [r, c, degree], where r is preferred to c.
# degree is degree of preference. Just leave degree at 0 for now.
for r, c, _ in prefs:
print '%s preferred to %s' % (r, c)
# add preferences to the model
GP.addPreferences(prefs)
# get another gallery, but with the first three dimensions fixed to .5
nbounds = deepcopy(bounds)
nbounds[:3] = [[.5,.5]]*3
gallery = fastUCBGallery(GP, nbounds, N)
prefs = query2prefs(gallery, tf.f)
for r, c, _ in prefs:
print '%s preferred to %s' % (r, c)
# get another gallery, but with the *last* three dimensions fixed to .5
nbounds = deepcopy(bounds)
nbounds[3:] = [[.5,.5]]*3
gallery = fastUCBGallery(GP, nbounds, N)
prefs = query2prefs(gallery, tf.f)
for r, c, _ in prefs:
print '%s preferred to %s' % (r, c)
# preferences don't have to come from the gallery
r = array([0, 0, .5, 0, 1, .25])
c = array([1, 1, .75, 1, 0, .5])
pref = (r, c, 0)
GP.addPreferences([pref])
if __name__ == '__main__':
demoObservations()
# demoPrefGallery()
| mit |
andaag/scikit-learn | sklearn/externals/joblib/__init__.py | 86 | 4795 | """ Joblib is a set of tools to provide **lightweight pipelining in
Python**. In particular, joblib offers:
1. transparent disk-caching of the output values and lazy re-evaluation
(memoize pattern)
2. easy simple parallel computing
3. logging and tracing of the execution
Joblib is optimized to be **fast** and **robust** in particular on large
data and has specific optimizations for `numpy` arrays. It is
**BSD-licensed**.
============================== ============================================
**User documentation**: http://pythonhosted.org/joblib
**Download packages**: http://pypi.python.org/pypi/joblib#downloads
**Source code**: http://github.com/joblib/joblib
**Report issues**: http://github.com/joblib/joblib/issues
============================== ============================================
Vision
--------
The vision is to provide tools to easily achieve better performance and
reproducibility when working with long running jobs.
* **Avoid computing twice the same thing**: code is rerun over an
over, for instance when prototyping computational-heavy jobs (as in
scientific development), but hand-crafted solution to alleviate this
issue is error-prone and often leads to unreproducible results
* **Persist to disk transparently**: persisting in an efficient way
arbitrary objects containing large data is hard. Using
joblib's caching mechanism avoids hand-written persistence and
implicitly links the file on disk to the execution context of
the original Python object. As a result, joblib's persistence is
good for resuming an application status or computational job, eg
after a crash.
Joblib strives to address these problems while **leaving your code and
your flow control as unmodified as possible** (no framework, no new
paradigms).
Main features
------------------
1) **Transparent and fast disk-caching of output value:** a memoize or
make-like functionality for Python functions that works well for
arbitrary Python objects, including very large numpy arrays. Separate
persistence and flow-execution logic from domain logic or algorithmic
code by writing the operations as a set of steps with well-defined
inputs and outputs: Python functions. Joblib can save their
computation to disk and rerun it only if necessary::
>>> import numpy as np
>>> from sklearn.externals.joblib import Memory
>>> mem = Memory(cachedir='/tmp/joblib')
>>> import numpy as np
>>> a = np.vander(np.arange(3)).astype(np.float)
>>> square = mem.cache(np.square)
>>> b = square(a) # doctest: +ELLIPSIS
________________________________________________________________________________
[Memory] Calling square...
square(array([[ 0., 0., 1.],
[ 1., 1., 1.],
[ 4., 2., 1.]]))
___________________________________________________________square - 0...s, 0.0min
>>> c = square(a)
>>> # The above call did not trigger an evaluation
2) **Embarrassingly parallel helper:** to make is easy to write readable
parallel code and debug it quickly::
>>> from sklearn.externals.joblib import Parallel, delayed
>>> from math import sqrt
>>> Parallel(n_jobs=1)(delayed(sqrt)(i**2) for i in range(10))
[0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0]
3) **Logging/tracing:** The different functionalities will
progressively acquire better logging mechanism to help track what
has been ran, and capture I/O easily. In addition, Joblib will
provide a few I/O primitives, to easily define define logging and
display streams, and provide a way of compiling a report.
We want to be able to quickly inspect what has been run.
4) **Fast compressed Persistence**: a replacement for pickle to work
efficiently on Python objects containing large data (
*joblib.dump* & *joblib.load* ).
..
>>> import shutil ; shutil.rmtree('/tmp/joblib/')
"""
# PEP0440 compatible formatted version, see:
# https://www.python.org/dev/peps/pep-0440/
#
# Generic release markers:
# X.Y
# X.Y.Z # For bugfix releases
#
# Admissible pre-release markers:
# X.YaN # Alpha release
# X.YbN # Beta release
# X.YrcN # Release Candidate
# X.Y # Final release
#
# Dev branch marker is: 'X.Y.dev' or 'X.Y.devN' where N is an integer.
# 'X.Y.dev0' is the canonical version of 'X.Y.dev'
#
__version__ = '0.9.0b3'
from .memory import Memory, MemorizedResult
from .logger import PrintTime
from .logger import Logger
from .hashing import hash
from .numpy_pickle import dump
from .numpy_pickle import load
from .parallel import Parallel
from .parallel import delayed
from .parallel import cpu_count
| bsd-3-clause |
tonyao-/iozone-results-comparator | old_version/iozone_results_comparator.py | 7 | 56394 | #!/usr/bin/python
# iozone_results_comparator.py - parse iozone output files and write stats and plots to output html
# Copyright (C) 2011
# Adam Okuliar aokuliar at redhat dot com
# Jiri Hladky hladky dot jiri at gmail dot com
# Petr Benas petrbenas at gmail dot com
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import sys
sys.path.append('/exports/perf/python')
import os
import argparse
import re
import numpy
from scipy import stats
import warnings
import shutil
import math
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from matplotlib.font_manager import fontManager, FontProperties
out_dir='Compare-html' # normal mode output directory name
out_file='index.html' # main html file name
tabdDir = 'Tab_delimited' # tab delimited output directory
# write html header and page beginning
# htmldoc - where to write
# baseFiles - input files used to get baseline data
# set1Files - input files used to get set1 data
def write_header(htmldoc, baseFiles, set1Files, title='Iozone', header='Iozone results'):
html_header='''
<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">
<html>
<head>
<meta http-equiv="Content-Type" content="text/html; charset=UTF-8"/>
<title>'''+title+'''</title>
</head>
<body>
<link rel="stylesheet" type="text/css" href="stylesheet.css">
<div class="main">
<div class="inner">
<h1 id="top">
'''+header+'''
</h1>
'''
htmldoc.write(html_header)
htmldoc.write('<DL class="filelist">')
htmldoc.write('<DT><STRONG>Baseline data set</STRONG><UL>')
for file_name in baseFiles:
htmldoc.write('<LI>'+file_name)
htmldoc.write('</UL>')
htmldoc.write('<DT><STRONG>Investigated data set</STRONG><UL>')
for file_name in set1Files:
htmldoc.write('<LI>'+file_name)
htmldoc.write('</UL>')
htmldoc.write('</DL>')
htmldoc.write('<p>mean => Arithmetic mean<br>')
htmldoc.write('standar dev. => Sample standard deviation<br>')
htmldoc.write('ci. max 90%, ci.min => confidence interval at confidence level 90% => it means that mean value of the distribution lies with 90% propability in interval ci_min-ci_max<br>')
htmldoc.write('geom. mean => Geometric mean<br>')
htmldoc.write('median => Second quartile = cuts data set in half = 50th percentile <br>')
htmldoc.write('first quartile => cuts off lowest 25% of data = 25th percentile <br>')
htmldoc.write('third quartile => cuts off highest 25% of data, or lowest 75% = 75th percentile <br>')
htmldoc.write('minimum => Lowest value of data set <br>')
htmldoc.write('maximum => Hightest value of data set <br>')
htmldoc.write('baseline set1 difference => Difference of medians of both sets in percennt. Arithmetic means are used in detail mode instead.<br>')
htmldoc.write('ttest p-value => Student\'s t-test p-value = probability the both data sets are equal <br>')
htmldoc.write('ttest equality => If p-value is higher than 0.1, data sets are considered being equal with 90% probability. Otherwise the data sets are considered being different.<br>')
htmldoc.write('Linear regression of all results regression line is in y = ax form, b coeficient is zero. </p>')
htmldoc.write('<p>for details about operations performed see <a href="http://www.iozone.org/docs/IOzone_msword_98.pdf">Iozone documentation</a>')
htmldoc.write('</p>')
return;
# end the page and close htmldoc
# htmldoc - where to write
def write_footer(htmldoc):
html_footer='''
</div>
</div>
</body>
</html>
'''
htmldoc.write(html_footer)
return
# create the plot image file
# graphlabel - label on the top of plot
# xlabel - label of x axis
# ylabel - label of y axis
# data - data in write_operation format
# name - figure name
# semilogx - wheather use logaritmic scale on X axis
# semilogy - wheather use logaritmic scale on Y axis
# type - to differ normal and detail mode plots
def make_plot(graphlabel,xlabel,ylabel,data,name,semilogx=False,semilogy=False, type='normal'):
# check for data delimeters, wheather it can be plotted
ok=True
l=len(data[0][2])+1 #JH
for data_item in data[1:]:
for run in data_item[2]:
if len(run)!=l:
ok=False
if not ok:
print 'figure '+name+' has different vector sizes, skipping plot'
sys.stdout.flush()
print l,
for data_item in data[1:]:
for run in data_item[2]:
print len(run),
print ''
return
# different values are being plotted in normal and detail mode
datalines = []
errorbars_maxes = []
errorbars_mins = []
if (type == 'normal'):
datalines.append(data[1][9])
datalines.append(data[2][9])
errorbars_mins.append(numpy.array(data[1][9]) - numpy.array(data[1][10]))
errorbars_mins.append(numpy.array(data[2][9]) - numpy.array(data[2][10]))
errorbars_maxes.append(numpy.array(data[1][11]) - numpy.array(data[1][9]))
errorbars_maxes.append(numpy.array(data[2][11]) - numpy.array(data[2][9]))
textstr = 'Plotted values are\n - first quartile\n - median\n - third quartile\nfor each datapoint.'
elif (type == 'detail'):
datalines.append(data[1][3])
datalines.append(data[2][3])
errorbars_mins.append(numpy.array(data[1][3]) - numpy.array(data[1][5]))
errorbars_mins.append(numpy.array(data[2][3]) - numpy.array(data[2][5]))
errorbars_maxes.append(numpy.array(data[1][6]) - numpy.array(data[1][3]))
errorbars_maxes.append(numpy.array(data[2][6]) - numpy.array(data[2][3]))
textstr = 'Plotted values are\n - ci. min. 90%\n - mean val.\n - ci. max 90%\nfor each datapoint.'
else:
print 'unknown make_plot function type parameter'
return
# get rid of starting and/or ending zero values - useful for detail mode
minIndex = 0
maxIndex = len(data[0][2])
while (datalines[0][minIndex] == datalines[1][minIndex] == 0):
minIndex += 1
while (datalines[0][maxIndex - 1] == datalines[1][maxIndex - 1] == 0):
maxIndex -= 1
plt.clf()
# create plot lines for both sets
for i in range(len(datalines)):
p=plt.plot(data[0][2][minIndex:maxIndex],datalines[i][minIndex:maxIndex],'o-',color=data[i+1][1],label=data[i+1][0])
plt.errorbar(data[0][2][minIndex:maxIndex],datalines[i][minIndex:maxIndex], yerr=[errorbars_mins[i][minIndex:maxIndex], errorbars_maxes[i][minIndex:maxIndex]] ,color=data[i+1][1], fmt='o-',)
plt.grid(True)
if semilogx:
plt.semilogx()
if semilogy:
plt.semilogy()
plt.xlabel(xlabel)
plt.ylabel(ylabel)
plt.title(graphlabel)
# add legend details
font = FontProperties(size='small');
a = plt.legend(loc=0, prop=font);
txt = matplotlib.offsetbox.TextArea(textstr, textprops=dict(size=7))
box = a._legend_box
box.get_children().append(txt)
box.set_figure(box.figure)
# Fedora 14 bug 562421 workaround
with warnings.catch_warnings():
warnings.filterwarnings("ignore",category=UserWarning)
plt.savefig(out_dir+'/'+name)
plt.clf()
return
# class to parse iozone results from input data files
class Parse_iozone:
# init, check wheather input files are readable
def __init__(self,iozone_file_list):
self.names=["iwrite", "rewrite", "iread", "reread", "randrd", "randwr", "bkwdrd", "recrewr",
"striderd", "fwrite", "frewrite", "fread", "freread"]
self.names_dictionary = {}
count = 0
for item in self.names:
self.names_dictionary[item]=count
++count
self.files=[]
self.operations = [] #List of operations
self.columns = {}
assert (iozone_file_list is not None)
for file_name in iozone_file_list:
if os.access(file_name, os.R_OK):
self.files.append(file_name)
else:
sys.stderr.write('File "%s" is not readable.\n' % (file_name))
self.read_all_files()
self.get_all_operations()
# split line to get data for operations
def split_iozone_line(self,line):
field_list = [16, 8, 8, 8, 9, 9, 8, 8, 8, 9, 9, 9, 9, 8, 9]
offset = 0
output = []
line=line.rstrip('\n')
for i in range(len(field_list)):
width = field_list[i]
substring=line[offset:width+offset]
offset += width
if len(substring) == width:
matchObj = re.match( r'^\s+$', substring, re.M)
if matchObj:
output.append(None)
else:
output.append(int(substring))
else:
output.append(None)
if i != len(field_list) -1 or ( width - len(substring) ) > 3 :
sys.stderr.write('%s "%s"\n' % ("Line:", line ) )
sys.stderr.write('\t%s "%s"\n' % ("Substring:", substring ) )
sys.stderr.write('\t%s %d, %s %d\n' % ("Length:", len(substring), "Expecting:", width ) )
return output
# read data from input files
def read_all_files(self):
file_counter = 0
for file_name in self.files:
this_file_columns = {}
++file_counter
f = open(file_name, "r")
for line in f:
matchObj = re.match( r'^\s+\d+\s+\d+\s+\d+', line, re.M)
if matchObj:
#Data lines
line_in_array = self.split_iozone_line(line);
#sys.stderr.write('%s\t%s\n' % (line_in_array[0],line_in_array[-1]) )
file_size = line_in_array.pop(0);
block_size = line_in_array.pop(0);
for j in range( 0, len(self.names), 1 ):
column_name = self.names[j]
full_column_name = 'FS_' + str(file_size) + '_BLOCK_' + str(block_size) + '_' + column_name
key=(file_size,block_size,column_name)
if ( j>len(line_in_array) ) or ( line_in_array[j] is None ):
#Check if key exists already
if ( file_counter > 1 and self.columns.has_key(key) ):
sys.stderr.write('%s: file number %d: value %s exists in previous files but not in this one!\n'
%(file_name, file_counter, full_column_name) )
self.columns[key].append(None)
this_file_columns[key]=None
else:
# We have non-empty value
if ( file_counter > 1 and not (self.columns.has_key(key) ) ):
sys.stderr.write('%s: file number %d: value %s is not defined in previous files!\n'
%(file_name, file_counter, full_column_name) )
self.columns[key]=[]
for temp_file_counter in range (1,file_counter-1,1):
self.columns[key].append(None)
#Now add values to the array
if not (self.columns.has_key(key) ):
self.columns[key]=[]
self.columns[key].append(line_in_array[j]/1024.0)
this_file_columns[key]=None
#File parsing is complete.
for key in self.columns.iterkeys():
if ( not ( this_file_columns.has_key(key) ) ):
sys.stderr.write('%s: file number %d: value %s exists in previous files but not in this one!\n'
%(file_name, file_counter, full_column_name) )
self.columns[key].append(None)
this_file_columns[key]=None
return
# check which operations were present on inut iozone files
def get_all_operations(self):
all_names = {}
for key in self.columns.iterkeys():
(FS,BS,NAME) = key
all_names[NAME] = self.names_dictionary[NAME]
for item in self.names:
if item in all_names.keys():
self.operations.append(item)
return
# return part of write_operation format data for operation , FS oriented
def get_FS_list_for_any_BS(self,operation):
all_BS = {}
all_FS = {}
x = []
y = []
for key in self.columns.iterkeys():
(FS,BS,NAME) = key
if ( NAME == operation):
if not all_BS.has_key(BS):
all_BS[BS]=[]
all_BS[BS].append(FS)
if not all_FS.has_key(FS):
all_FS[FS]=[]
all_FS[FS].append(BS)
for FS in sorted(all_FS.keys()):
#List of all file sizes
x.append(FS)
for BS in sorted(all_BS.keys()):
for file_number in range (len(self.files)):
row = []
row.append(BS)
for FS in sorted(all_FS.keys()):
#We are creating a row of table
#Columns - different file sizes (FS)
# format is array ['label',Y_for_FS1, Y_for_FS2]
#We need to check if Y_for_FS1 exists
if self.columns.has_key((FS,BS,operation)):
assert(len( self.columns[(FS,BS,operation)] ) == len(self.files))
row.append(self.columns[FS,BS,operation][file_number])
else:
row.append(0)
y.append(row)
return (x,y)
# return part of write_operation format data for operation , BS oriented
def get_BS_list_for_any_FS(self,operation):
all_BS = {}
all_FS = {}
x = []
y = []
for key in self.columns.iterkeys():
(FS,BS,NAME) = key
if ( NAME == operation):
if not all_BS.has_key(BS):
all_BS[BS]=[]
all_BS[BS].append(FS)
if not all_FS.has_key(FS):
all_FS[FS]=[]
all_FS[FS].append(BS)
for BS in sorted(all_BS.keys()):
#List of all block sizes
x.append(BS)
for FS in sorted(all_FS.keys()):
for file_number in range (len(self.files)):
row = []
row.append(FS)
for BS in sorted(all_BS.keys()):
#We are creating a row of table
#Columns - different block sizes (BS)
# format is array ['label',Y_for_BS1, Y_for_BS2]
#We need to check if Y_for_BS1 exists
if self.columns.has_key((FS,BS,operation)):
assert(len( self.columns[(FS,BS,operation)] ) == len(self.files))
row.append(self.columns[FS,BS,operation][file_number])
else:
row.append(0)
y.append(row)
return (x,y)
# return all set data for operation given
def get_all_for_operation(self,operation):
result = []
for key in self.columns.iterkeys():
(FS,BS,NAME) = key
if ( NAME == operation):
for val in self.columns[key]:
result.append(val)
return result
# get all set data for all operations
def get_all_data_list(self):
result = []
for key in self.columns.iterkeys():
for val in self.columns[key]:
result.append(val)
return result
# checks wheather is file readable
# file_name - file to check
def is_file_readable(file_name):
if os.access(file_name, os.R_OK):
return file_name
else:
msg='File "%s" is not readable.\n' % (file_name)
raise argparse.ArgumentTypeError(msg)
# count mean, standard deviation and confidence interval of data given in list
# data = input data onedimensional list
# confidence = confidence interval probability rate in percent
# m = mean
# sd = standard deviation
# h = error
# m-h = confidence interval min
# m+h = confidence interval max
# gm = geometric mean
# med = median
# frstQrt = first quartile
# thrdQrt = third quartile
# minVal = sample minimum
# maxVal = sample maximum
def mean_confidence_interval(data, confidence=0.90):
actualInput = remove_zeros_from_list(data)
# if input were all zeros
if not actualInput:
return (0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0)
n = len(actualInput)
m = numpy.mean(actualInput) # arithmetic mean
gm = stats.gmean(actualInput) # geometric mean
med = numpy.median(actualInput) # median
frstQrt = stats.scoreatpercentile(actualInput, 25) # first quartile
thrdQrt = stats.scoreatpercentile(actualInput, 75) # third quartile
minVal = sorted(actualInput)[0]
maxVal = sorted(actualInput)[-1]
sd = numpy.std(actualInput, ddof=1) # delta degree of freedom = 1 - sample standard deviation (Bessel's correction)
se = sd/numpy.sqrt(n) # standard error
h = se * stats.t._ppf((1+confidence)/2., n-1) # confidence interval
return (m, sd, h, m-h, m+h, gm, med, frstQrt, thrdQrt, minVal, maxVal)
# compute all statistical values here
# data is twodimensional array
# order - order of column data values
# returns vectors of statistical values describing input data columns
def compute_all_stats(data, order=None):
# if no order given, just walk through data
if (order == None):
order = range(len(data))
devs=[]
avgs=[]
ci_mins=[]
ci_maxes=[]
errs=[]
gms=[]
meds=[]
frstQrts=[]
thrdQrts=[]
minVals=[]
maxVals=[]
for colNr in order:
(avg, dev, err, ci_min, ci_max, gm, med, frstQrt, thrdQrt, minVal, maxVal) = mean_confidence_interval(data[colNr])
avgs.append(avg)
devs.append(dev)
errs.append(err)
ci_mins.append(ci_min)
ci_maxes.append(ci_max)
gms.append(gm)
meds.append(med)
frstQrts.append(frstQrt)
thrdQrts.append(thrdQrt)
minVals.append(minVal)
maxVals.append(maxVal)
return (avgs, devs, errs, ci_mins, ci_maxes, gms, meds, frstQrts, thrdQrts, minVals, maxVals)
# count ttest p-value from two sets of tada
# data[[baseline col1, baseline col2, ...], [set1 col1, set1 col2, ]]
# return [col1 result, col2 result]
def ttest_equality(data):
res = []
for i in range(len(data[0])):
# scipy's ttest uses mean from scipy, which is deprecated. this is temporary workout
# until it's fixed to use mean from numpy
with warnings.catch_warnings():
warnings.filterwarnings("ignore",category=DeprecationWarning)
(tstat, pval) = stats.ttest_ind(remove_zeros_from_list(data[0][i]), remove_zeros_from_list(data[1][i]))
res.append(pval)
return res
# self-explanatory
# list input, outputs list with zero values erased
def remove_zeros_from_list(dataIn):
# get rid of zeros. Zero indicates value missing, so there is no
# intention to take it as a valid value.
actualInput = []
for i in dataIn:
if (i != float(0)):
actualInput.append(i)
return actualInput
# count statistics in one dimension for data given
# input data format:
#data[0][0]=label for x values
#data[0][1]=label for values to be agregated in second col
#data[0][2]=[column names]
#data[1][0]=label for set
#data[1][1]=color for set
#data[1][2]=[[row1_name, run1_value_for_col1,run1_value_for_col2], [row2_name, run1_value_for_col1,run1_value_for_col2],]
# adds folloiwing to input data:
# data[i][3] = arithmetic means
# data[i][4] = deviations
# data[i][5] = ci_mins
# data[i][6] = ci_maxes
# data[i][7] = errors
# data[i][8] = geometric means
# data[i][9] = medians
# data[i][10] = first quartiles
# data[i][11] = third quartiles
# data[i][12] = minimal Values
# data[i][13] = maximal Values
# data[-1] = ttest results
def stats_one_dim(data):
# basic check for expected data format
assert(len(data)==3)
for i in range(1,len(data)):
nrOfCols=len(data[1][2][0])-1
cols=[] # list of lists of colunn values.
for f in range(nrOfCols):
cols.append([])
# get data from input format to cols data structure
for v in range(len(data[i][2])):
for j in range(nrOfCols):
cols[j].append(float(data[i][2][v][j+1])) # fill column list
# compute statistics
(avgs, devs, errs, ci_mins, ci_maxes, gms, meds, frstQrts, thrdQrts, minVals, maxVals) = compute_all_stats(cols, range(nrOfCols))
# append results to input
data[i].append(avgs)
data[i].append(devs)
data[i].append(ci_mins)
data[i].append(ci_maxes)
data[i].append(errs)
data[i].append(gms)
data[i].append(meds)
data[i].append(frstQrts)
data[i].append(thrdQrts)
data[i].append(minVals)
data[i].append(maxVals)
if (i == 1): # computing baseline
baselineCols = cols
# compute ttest
ttestInput=[baselineCols]
ttestInput.append(cols)
data.append(ttest_equality(ttestInput))
return
# remove columns full of zeros in both sets
# data - data - [setNr][row]
# data are changed in situ, return index of firs non-zero column(from original data)
def remove_all_zeros_columns(data):
# detect all-zeros columns
oldNrOfCols=len(data[0][0])
zeros = []
for i in range(oldNrOfCols):
zeros.append(True)
# if there is single non-zero value, the column is not emply
for colNr in range(oldNrOfCols): # for every column
for set in data: # for both sets
for row in set: # for every line
if (row[colNr] != 0):
zeros[colNr] = False
# remove all-zeros columns - we count averages, otherwise we would divide by a zero
for colNr in reversed(range(oldNrOfCols)): # for every column
for setNr in range(len(data)): # for both sets
for rowNr in range(len(data[setNr])): # for every line
if (zeros[colNr]):
del data[setNr][rowNr][colNr]
# return index of first valid column
for i in range(len(zeros)):
if not (zeros[i]):
return i
# detail mode for closer view to one operation results
# it basicaly does what __main__ does in normal mode
# base - Parse_iozone object with baseline data
# set1 - Parse_iozone object with set1 data
# operation - operation to write
def detail(base, set1, operation):
global out_dir
# all operations - recursively call detail function for all operations
if (operation == 'ALL'):
for op in set1.names:
detail(base, set1, op)
sys.exit(0)
if not (operation in set1.names):
print 'Unknown operation ' + operation
print 'Valid operations are: ' + ', '.join(set1.names) + ', ALL'
sys.exit(1)
# Fixed Block Size
print 'writing detailed info about ' + operation + ' for fixed block size'
out_dir='Compare_' + operation + '_for_fixed_Block_Size'
try:
shutil.rmtree('./'+out_dir)
except:
pass
os.makedirs(out_dir)
shutil.copyfile('./stylesheet.css',out_dir+'/stylesheet.css')
htmldoc=open(out_dir+'/'+out_file,'w')
# prepare data to write_operation data format
write_header(htmldoc, base.files, set1.files, operation + ' for fixed BS', 'Iozone results for ' + operation +', data are arranged by block size');
data = []
data.append(['File size [kB]','Block size [kB]'])
data.append(['baseline','black'])
(x1,y1)=base.get_FS_list_for_any_BS(operation)
data[0].append(x1)
data[1].append(y1)
data.append(['set1','red'])
(x2,y2)=set1.get_FS_list_for_any_BS(operation)
assert(x1==x2)
data[2].append(y2)
# call write function
write_detail_html(htmldoc,operation,data)
write_footer(htmldoc)
htmldoc.close()
# Fixed file size
print 'writing detailed info about ' + operation + ' for fixed file size'
out_dir='Compare_' + operation + '_for_fixed_File_Size'
try:
shutil.rmtree('./'+out_dir)
except:
pass
os.makedirs(out_dir)
shutil.copyfile('./stylesheet.css',out_dir+'/stylesheet.css')
htmldoc=open(out_dir+'/'+out_file,'w')
# prepare data to write_operation data format
write_header(htmldoc, base.files, set1.files, operation + ' for fixed FS', 'Iozone results for ' + operation + ', data are arranged by file size');
data = []
data.append(['Block size [kB]', 'File size [kB]'])
data.append(['baseline','black'])
(x1,y1)=base.get_BS_list_for_any_FS(operation)
data[0].append(x1)
data[1].append(y1)
data.append(['set1','red'])
(x2,y2)=set1.get_BS_list_for_any_FS(operation)
assert(x1==x2)
data[2].append(y2)
# call write function
write_detail_html(htmldoc,operation,data)
write_footer(htmldoc)
htmldoc.close()
print 'Finished.\nTo view detailed info about ' + operation + ' see following pages in your web browser'
print 'file://' + os.getcwd() + '/Compare_' + operation + '_for_fixed_Block_Size/index.html'
print 'file://' + os.getcwd() + '/Compare_' + operation + '_for_fixed_File_Size/index.html'
# write tables and plots in detail mode
# basicaly what write_operation function does in normal mode, although not only stats are printed, also prints the data
# htmldoc - where to write
# operation - name of operation being currently written
# data - data in write_operation function format
def write_detail_html(htmldoc,operation,data):
# aggregate values
agrVals = [{},{}] # {'block size': [[file1_col1, file1_col2, ..], [file2_col1, file2_col2,...] ...]}
# for every set
for i in range(len(data)-1):
# for every block/file size
for v in range(len(data[i+1][2])):
key = data[i+1][2][v][0]
val = data[i+1][2][v][1:] # cutting row name
if not key in agrVals[i].keys():
agrVals[i][key] = []
agrVals[i][key].append(val)
# for every key
for key in sorted(agrVals[0].keys()):
# remove all zeros columns. starColNr = first valid column number, needed for propriate legend
startColNr = remove_all_zeros_columns([ agrVals[0][key], agrVals[1][key] ])
nrOfCols=len(agrVals[0][key][0])
htmldoc.write('<a name="'+str(key)+'"></a> \n')
htmldoc.write('<img src=\"'+str(key)+'.png\" alt=\"'+str(key)+'\" class="plot" />\n')
htmldoc.write('<table>\n')
htmldoc.write('<tr class=\"bottomline\">')
htmldoc.write('<td rowspan=\"2\"/>\n')
htmldoc.write('<td rowspan=\"2\">'+data[0][1]+'</td>\n')
htmldoc.write('<td colspan=\"'+str(nrOfCols)+'\">' + data[0][0] + '</td>\n')
htmldoc.write('</tr>\n')
htmldoc.write('<tr>')
for m in range(startColNr, startColNr + nrOfCols):
htmldoc.write('<td>'+str(int(data[0][2][m]))+'</td>\n')
htmldoc.write('</tr>\n')
# for both sets
for i in range(len(data)-1):
for rowNr in range(len(agrVals[i][key])):
if (rowNr == 0):
htmldoc.write('<tr class=\"topline\">')
htmldoc.write('<td rowspan=\"'+str(len(agrVals[i][key])+10)+'\">'+data[i+1][0]+'</td>')
htmldoc.write('<td>'+str(int(key))+'</td>')
else:
htmldoc.write('<tr>')
htmldoc.write('<td>'+str(int(key))+'</td>')
for val in agrVals[i][key][rowNr]:
htmldoc.write('<td>'+str(round(val,2))+'</td>')
htmldoc.write('</tr>\n')
# prepare data for stats
columnValues = [] # [[col1_row1, col1_row2, ...], [col2_row1, col2_row2, ...], ...]
for m in data[0][2]:
columnValues.append([])
for row in agrVals[i][key]:
for colNr in range(len(row)):
columnValues[colNr].append(row[colNr])
# compute statistics
(avgs, devs, errs, ci_mins, ci_maxes, gms, meds, frstQrts, thrdQrts, minVals, maxVals) = compute_all_stats(columnValues)
if (i == 0):
avgs_baseline = avgs
columnValues_baseline = columnValues
# statistic vals
statVals = (avgs, devs, ci_mins, ci_maxes, gms, meds, frstQrts, thrdQrts, minVals, maxVals)
# data nr value name
valNames = ('mean val.', 'standard dev.', 'ci. min. 90%', 'ci. max 90%', 'geom. mean', 'median',
'first quartile', 'third quartile', 'minimum', 'maximum')
# write all statistic values
for v in range(len(statVals)):
if (v == 0):
htmldoc.write('<tr class=\"topline\">\n')
else:
htmldoc.write('<tr>\n')
htmldoc.write('<td>' + valNames[v] + '</td>\n')
for m in range(nrOfCols):
htmldoc.write('<td>'+str(round(statVals[v][m],2))+'</td>\n')
htmldoc.write('</tr>\n')
# append results to input
data[i+1].append(avgs)
data[i+1].append(devs)
data[i+1].append(ci_mins)
data[i+1].append(ci_maxes)
data[i+1].append(errs)
# count differences a ttest
differences = []
for m in range(nrOfCols):
differences.append((avgs[m] /avgs_baseline[m] - 1) * 100)
ttestInput=[[],[]]
for i in range(nrOfCols):
ttestInput[0].append(columnValues_baseline[i])
ttestInput[1].append(columnValues[i])
ttestResults = ttest_equality(ttestInput)
# and let it be written
write_diff_ttest(htmldoc, differences, ttestResults)
# make plot
make_plot(data[0][1]+' = '+str(key),data[0][0],'Operation speed [MB/s]',data,str(key),semilogx=True,semilogy=False, type='detail')
# delete what was appended above to use make_plot function
del data[1][3:]
del data[2][3:]
return
# write differences and ttest results and close table
# htmldoc - where to write
# differences - list of set1 baseline differences
# tterst Results - list of t-test p-values
def write_diff_ttest(htmldoc, differences, ttestResults):
# write differences
nrOfCols = len(differences)
htmldoc.write('<tr class=\"bottomline topline\">\n')
htmldoc.write('<td colspan="2">baseline set1 difference</td>\n')
for m in range(nrOfCols):
htmldoc.write('<td>'+str(round(differences[m],2))+' % </td>\n')
htmldoc.write('</tr>\n')
# write p-values
htmldoc.write('<tr class=\"bottomline\">\n')
htmldoc.write('<td colspan="2">ttest p-value</td>\n')
for m in range(nrOfCols):
htmldoc.write('<td>'+str(round(ttestResults[m],4))+'</td>\n')
htmldoc.write('</tr>\n')
# tuple ternary operator workaround
ternary_op = ('DIFF', 'SAME')
htmldoc.write('<tr class=\"bottomline\">\n')
htmldoc.write('<td colspan="2">ttest equality</td>\n')
# write ttest result according p-val
for m in range(nrOfCols):
if (math.isnan(ttestResults[m])):
res = 'nan'
else:
res = ternary_op[(ttestResults[m] > 0.1)]
htmldoc.write('<td>'+res+'</td>\n') # 90% probability
htmldoc.write('</tr>\n')
htmldoc.write('</table>\n')
return
# write table of operation stats
# htmldoc - where to write
# operation - operation name
# data - input data in following format:
#data[0][0]=label for x values
#data[0][1]=label for values to be agregated in second col
#data[0][2]=[column names]
#data[1][0]=label for set
#data[1][1]=color for set
#data[1][2]=[[row1_name, run1_value_for_col1,run1_value_for_col2], [row2_name, run1_value_for_col1,run1_value_for_col2],]
# data[i][3] = arithmetic means
# data[i][4] = deviations
# data[i][5] = ci_mins
# data[i][6] = ci_maxes
# data[i][7] = errors
# data[i][8] = geometric means
# data[i][9] = medians
# data[i][10] = first quartiles
# data[i][11] = third quartiles
# data[i][12] = minimal Values
# data[i][13] = maximal Values
# data[-1] = ttest results
# filename - how to name plot image file
def write_operation(htmldoc,operation,data,filename):
htmldoc.write('<img src=\"'+filename+'.png\" alt=\"'+filename+'\" class="plot"/>\n')
htmldoc.write('<table>\n')
# table header
htmldoc.write('<tr>')
htmldoc.write('<th class=\"bottomline\">'+operation_name(operation)+'</th>\n')
htmldoc.write('<th class=\"bottomline\">'+data[0][0]+'</th>\n')
for m in data[0][2]:
htmldoc.write('<th>'+str(int(m))+'</th>\n')
htmldoc.write('</tr>\n')
nrOfCols=len(data[1][2][0])-1
# compute statistics
stats_one_dim(data)
# for both data sets
for i in range(1,(len(data)-1)):
# agregate rows with the same row main value(BS/FS)
agrVals={} # {'block size': [column number][val_file1, val_file2, ..], ...}
for v in range(len(data[i][2])):
for j in range(nrOfCols):
# agregate by row name
key = data[i][2][v][0]
val = data[i][2][v][j+1]
if not key in agrVals.keys():
agrVals[key] = []
for column in range(nrOfCols):
agrVals[key].append([])
agrVals[key][j].append(val)
# recognize set name
htmldoc.write('<tr class=\"topline\">\n')
if (i == 1):
htmldoc.write('<td rowspan="10">baseline</td><td>mean val.</td>\n')
else:
htmldoc.write('<td rowspan="10">set1</td><td>mean val.</td>\n')
for m in range(nrOfCols):
htmldoc.write('<td class=\"topline\">'+str(round((data[i][3][m]),2))+'</td>\n')
htmldoc.write('</tr>\n')
# data nr value name
valNames = {4:'standard dev.', 5:'ci. min. 90%', 6:'ci. max 90%', 8:'geom. mean', 9:'median',
10:'first quartile', 11:'third quartile', 12:'minimum', 13:'maximum'}
# write all statistic values
for v in sorted(valNames.keys()):
htmldoc.write('<tr>\n')
htmldoc.write('<td>' + valNames[v] + '</td>\n')
for m in range(nrOfCols):
htmldoc.write('<td>'+str(round(data[i][v][m],2))+'</td>\n')
htmldoc.write('</tr>\n')
# count differences
differences = []
for m in range(nrOfCols):
differences.append((data[2][9][m] / data[1][9][m] - 1) * 100)
# write differences and ttest results
write_diff_ttest(htmldoc, differences, data[-1])
# link to tabd, need inverse agregation
if (filename == operation + '_BS'):
inverseFSBS = '_FS'
else:
inverseFSBS = '_BS'
hrefBaseline = operation + inverseFSBS + '_baseline.tsv'
hrefSet1 = operation + inverseFSBS + '_set1.tsv'
htmldoc.write('<div class=\"rawdata belowtable\">See raw data <a href=\"../'+tabdDir+'/'+hrefBaseline+'\">'+hrefBaseline+'</a>')
htmldoc.write(' and <a href=\"../'+tabdDir+'/'+hrefSet1+'\">'+hrefSet1+'</a>.</div>\n')
# dataXX[:-1] cuts data[3] where ttest results are stored, make_plot function cannot handle it itself
make_plot(operation_name(operation),data[0][0],'Operation speed [MB/s]',data[:-1],filename,semilogx=True,semilogy=False, type='normal')
return
# write Overall Summary plot and table
# data - both sets data in [[(FS, BSdata)],[(FS, BSdata)]] # [0] = base, [1] = set1 format
# operations - operations order for tab delimited
# regLines - Linear regression of all results
def write_overFSBS_summary(resultsForOp, operations, regLines):
htmldoc.write('<h3 id="summary">Overall summary</h3>')
htmldoc.write('<img src=\"summary.png\" alt=\"summary\" class="plot"/>\n')
htmldoc.write('<table>\n')
htmldoc.write('<tr>')
htmldoc.write('<td/><td>Operation</td>\n')
for operation in operations:
htmldoc.write('<td>'+operation_name(operation)+'</td>\n')
htmldoc.write('</tr>\n')
nrOfOps = len(resultsForOp[0].keys())
setNr = 0
# for both sets
for i in range(len(resultsForOp)):
setNr += 1
# compute statistics
(avgs, devs, errs, ci_mins, ci_maxes, gms, meds, frstQrts, thrdQrts, minVals, maxVals) = compute_all_stats(resultsForOp[i], operations)
# save some values from baseline to not to be replaced by set1 values in second iteration
if (i == 0):
setname = 'baseline'
meds_baseline = meds
frstQrts_baseline = frstQrts
thrdQrts_baseline = thrdQrts
minVals_baseline = minVals
maxVals_baseline = maxVals
else:
setname = 'set1'
htmldoc.write('<tr class=\"topline\">\n')
htmldoc.write('<td rowspan="10">'+setname+'</td><td>mean val.</td>\n')
for m in range(nrOfOps):
htmldoc.write('<td class=\"topline\">'+str(round(avgs[m],2))+'</td>\n')
htmldoc.write('</tr>\n')
# statistic vals
statVals = (devs, ci_mins, ci_maxes, gms, meds, frstQrts, thrdQrts, minVals, maxVals)
# data nr value name
valNames = ('standard dev.', 'ci. min. 90%', 'ci. max 90%', 'geom. mean', 'median',
'first quartile', 'third quartile', 'minimum', 'maximum')
# write all statistic values
for v in range(len(statVals)):
htmldoc.write('<tr>\n')
htmldoc.write('<td>' + valNames[v] + '</td>\n')
for m in range(len(statVals[v])):
htmldoc.write('<td>'+str(round(statVals[v][m],2))+'</td>\n')
htmldoc.write('</tr>\n')
if (setNr == 1):
baseline_avgs = avgs
htmldoc.write('<tr class=\"bottomline topline\">\n')
htmldoc.write('<td colspan="2">linear regression slope 90%</td><td></td>\n')
# excluding operation ALL
for m in range(nrOfOps - 1):
(slope, std_err, ci_min, ci_max) = regLines[m]
htmldoc.write('<td>'+str(numpy.around(ci_min,2))+' - '+str(numpy.around(ci_max,2))+'</td>\n')
htmldoc.write('</tr>\n')
# compute and write differences and t-test values
differences = []
for m in range(nrOfOps):
differences.append((meds[m] / meds_baseline[m] - 1) * 100)
ttestInput=[[],[]]
for operation in operations:
ttestInput[0].append(resultsForOp[0][operation])
ttestInput[1].append(resultsForOp[1][operation])
ttestResults = ttest_equality(ttestInput)
write_diff_ttest(htmldoc, differences, ttestResults)
# link tab delimited
hrefSorted = 'summary_sorted_by_operation_'
htmldoc.write('<div class=\"rawdata belowtable\">See raw data <a href=\"../'+tabdDir+'/'+hrefSorted+'baseline.tsv\">'+hrefSorted+'baseline.tsv</a>')
htmldoc.write(' and <a href=\"../'+tabdDir+'/'+hrefSorted+'set1.tsv\">'+hrefSorted+'set1.tsv</a>.<br>')
htmldoc.write('All data are aggregated in <a href=\"../'+tabdDir+'/summary_all_baseline.tsv\">summary_all_baseline.tsv</a>.</div>')
# create the whiskers summary plot
textstr = 'Plotted values are\n - (sample minimum)\n - lower quartile \n - median\n - upper quartine\n - (sample maximum)\nfor each datapoint.'
plt.clf()
width=0.35
x=numpy.arange(len(operations))
# baseline set1 bars one next another
x1=x+width/2
x2=x+1.5*width
fig = plt.figure()
DefaultSize = fig.get_size_inches()
fig.set_size_inches( (DefaultSize[0]*1.5, DefaultSize[1]))
ax = fig.add_subplot(111)
# whiskers
ax.errorbar(x1,meds_baseline,yerr=[numpy.array(meds_baseline) - numpy.array(minVals_baseline),numpy.array(maxVals_baseline) - numpy.array(meds_baseline)],color='red',linestyle='None',marker='None')
ax.errorbar(x2,meds,yerr=[numpy.array(meds) - numpy.array(minVals),numpy.array(maxVals) - numpy.array(meds)],color='black',linestyle='None',marker='None')
# baseline bars
rects1 = ax.bar(x,numpy.array(meds_baseline) - numpy.array(frstQrts_baseline),width,bottom=frstQrts_baseline,color='red')
ax.bar(x,numpy.array(thrdQrts_baseline) - numpy.array(meds_baseline),width,bottom=meds_baseline,color='red')
# set1 bars
rects2 = ax.bar(x+width,numpy.array(meds) - numpy.array(frstQrts),width,bottom=frstQrts,color='white')
ax.bar(x+width,numpy.array(thrdQrts) - numpy.array(meds),width,bottom=meds,color='white')
ax.set_ylabel('Operation speed [MB/s]')
ax.set_title('Summary sorted by operation')
ax.set_xticks(x+width)
opNames = []
# operations names on X axis
for op in operations:
opNames.append(operation_name(op))
ax.set_xticklabels(tuple(opNames), size=9)
# legend
font = FontProperties(size='small');
a = plt.legend((rects1[0], rects2[0]), ('Baseline', 'Set1'), loc=0, prop=font);
txt = matplotlib.offsetbox.TextArea(textstr, textprops=dict(size=7))
box = a._legend_box
box.get_children().append(txt)
box.set_figure(box.figure)
# Fedora 14 bug 562421 workaround
with warnings.catch_warnings():
warnings.filterwarnings("ignore",category=UserWarning)
plt.savefig(out_dir+'/'+'summary')
plt.clf()
return
# Return full iozone documentation name of operation
# operation this script internal operation name(no spaces, usable in filename)
def operation_name(operation):
names={"iwrite":"Write", "rewrite":"Re-write", "iread":"Read",
"reread":"Re-read", "randrd":"Random\nread", "randwr":"Random\nwrite",
"bkwdrd":"Backwards\nread", "recrewr":"Record\nrewrite", "striderd":"Strided\nRead",
"fwrite":"Fwrite","frewrite":"Frewrite", "fread":"Fread", "freread":"Freread", "ALL":"ALL"}
return names[operation]
# write statistical values to the bottom lines of tab delimited output
# tabd - where to write
# statVals - list of values in specific order(see valNames var in this function)
def write_tabd_stats(tabd, statVals):
# data nr value name
valNames = ('mean val.', 'standard dev.', 'ci. min. 90%', 'ci. max 90%', 'geom. mean', 'median',
'first quartile', 'third quartile', 'minimum', 'maximum')
tabd.write('# Statistics (excluding 0 values)\n')
for v in range(len(statVals)):
tabd.write('#' + valNames[v])
for m in range(len(statVals[v])):
tabd.write('\t'+str(round(statVals[v][m],2)))
tabd.write('\n')
return
# write oocalc formulas to the end of tabd
# it's useful in verification this script computes the same stat values OO does
# tabd - where to write
# dataEnd - line number of last dataline
def write_oocalc_formulas(tabd, dataEnd):
tabd.write('#OOCALC formulas\n')
tabd.write('#mean val.\t=AVERAGE(B5:B' + dataEnd + ')\n')
tabd.write('#standard dev.\t=STDEV(B5:B' + dataEnd + ')\n')
tabd.write('#ci. min. 90%\t=AVERAGE(B5:B' + dataEnd + ')-TINV(1/10;COUNT(B5:B' + dataEnd + ')-1)*STDEV(B5:B' + dataEnd + ')/SQRT(COUNT(B5:B' + dataEnd + '))\n')
tabd.write('#ci. max 90%\t=AVERAGE(B5:B' + dataEnd + ')+TINV(1/10;COUNT(B5:B' + dataEnd + ')-1)*STDEV(B5:B' + dataEnd + ')/SQRT(COUNT(B5:B' + dataEnd + '))\n')
tabd.write('#geom. mean\t=GEOMEAN(B5:B' + dataEnd + ')\n')
tabd.write('#median\t=MEDIAN(B5:B' + dataEnd + ')\n')
tabd.write('#first quatile\t=QUARTILE(B5:B' + dataEnd + ';1)\n')
tabd.write('#third quartile\t=QUARTILE(B5:B' + dataEnd + ';3)\n')
tabd.write('#minimum\t=QUARTILE(B5:B' + dataEnd + ';0)\n')
tabd.write('#maximum\t=QUARTILE(B5:B' + dataEnd + ';4)\n')
# craate tab delimieted output for single operation
# data - input in write_operation function format
# operation - name of operation being written
# fsbs - determines whether the data has FS or BS orientation - needed for file name
def tab_delimited(data, operation, fsbs):
for setNr in range(1, len(data) - 1):
if (setNr == 1):
setName = 'baseline'
else:
setName = 'set1'
tabdName = operation + '_' + fsbs + '_' + setName + '.tsv'
tabd = open(tabdDir+'/'+tabdName, 'w')
tabd.write('# ' + operation + ' throughput for any ' + fsbs + '. Open it with: LC_ALL=en_US.UTF-8\n')
tabd.write('# Read this file into Open Office with command oocalc <filename>\n')
tabd.write('# Read this file into language R with command data <- read.delim("<filename>",comment.char = "#")\n')
tabd.write(data[0][0])
for i in data[0][2]:
tabd.write('\t'+str(i))
tabd.write('\n')
# write the data
dataLineNr = 4
rowNr = 0
rowName = data[setNr][0]
for row in data[setNr][2]:
if (row[0] == rowName):
rowNr += 1
else:
rowName = row[0]
rowNr = 1
tabd.write(data[0][1] + ' = ' + str(row[0])+' Run='+str(rowNr))
for val in row[1:]:
if (val != 0):
val2write = str(round(val,2))
else:
val2write = ''
tabd.write('\t' + val2write)
tabd.write('\n')
dataLineNr += 1
# prepare data for stats
columnValues = [] # [[col1_row1, col1_row2, ...], [col2_row1, col2_row2, ...], ...]
for m in data[setNr][2][0][1:]:
columnValues.append([])
for row in data[setNr][2]:
for colNr in range(1, len(row)):
columnValues[colNr-1].append(row[colNr])
# compute statistics
(avgs, devs, errs, ci_mins, ci_maxes, gms, meds, frstQrts, thrdQrts, minVals, maxVals) = compute_all_stats(columnValues)
# write all statistic values
statVals = (avgs, devs, ci_mins, ci_maxes, gms, meds, frstQrts, thrdQrts, minVals, maxVals)
write_tabd_stats(tabd, statVals)
write_oocalc_formulas(tabd, str(dataLineNr))
tabd.close()
return
# craate tab delimited output of sumary sorted by operation and overall summary
# data - both sets data in [[(FS, BSdata)],[(FS, BSdata)]] # [0] = base, [1] = set1 format
# operations - operations order for tab delimited
def tab_delimited_summary(data, order):
for setNr in range(len(data)):
if (setNr == 0):
setName = 'baseline'
else:
setName = 'set1'
tabdOperationName = 'summary_sorted_by_operation' + '_' + setName + '.tsv'
tabdAllName = 'summary_all' + '_' + setName + '.tsv'
tabdOperation = open(tabdDir+'/'+tabdOperationName, 'w')
tabdAll = open(tabdDir+'/'+tabdAllName, 'w')
tabdOperation.write('# iozone measured throughput[MB/s] for any FS and any BS. Open it with: LC_ALL=en_US.UTF-8\n')
tabdOperation.write('# Read this file into Open Office with command oocalc <filename>\n')
tabdOperation.write('# Read this file into language R with command data <- read.delim("<filename>",comment.char = "#")\n')
tabdAll.write('# iozone measured throughput[MB/s] for any FS, any BS and any operation. Open it with: LC_ALL=en_US.UTF-8\n')
tabdAll.write('# Read this file into Open Office with command oocalc <filename>\n')
tabdAll.write('# Read this file into language R with command data <- read.delim("<filename>",comment.char = "#")\n')
tabdAll.write('ALL\n')
tabdOperation.write('Operation')
for operation in order:
tabdOperation.write('\t'+operation)
tabdOperation.write('\n')
# prepare data for stats
columnValues = [] # [[col1_row1, col1_row2, ...], [col2_row1, col2_row2, ...], ...]
allSetData = []
valsOp = {}
valsAll = {}
for m in order:
columnValues.append([])
# prepare data in format more usable for what will follow
for opNr in range(len(data[setNr])):
(x, y) = data[setNr][opNr]
Run = 0
FS = y[0][0]
for row in y:
if (row[0] == FS):
Run += 1
else:
FS = row[0]
Run = 1
for bsNr in range(len(x)):
valsOp[(FS, x[bsNr], Run, opNr)] = row[bsNr+1]
valsAll[(opNr, FS, x[bsNr], Run)] = row[bsNr+1]
columnValues[opNr].append(row[bsNr+1])
allSetData.append(row[bsNr+1])
# write conted of tabd sorted by operation
row = []
dataLineNr = 4
for key in sorted(valsOp.keys()):
(FS, BS, Run, opNr) = key
if (valsOp[key] != 0):
val = str(round(valsOp[key],2))
else:
val = ''
row.append(val)
if (opNr == 0):
caption = 'Filesize[kB] = ' + str(FS) + ' Block size [kB] = ' + str(BS) + ' Run=' + str(Run)
if (opNr == (len(order) - 1 )):
empty = True;
for val in row:
if val:
empty = False;
if not (empty):
tabdOperation.write(caption)
for val in row:
tabdOperation.write('\t' + val)
tabdOperation.write('\n')
dataLineNr += 1
row = []
# write overall tabd content
tabdAllLine = 4
for key in sorted(valsAll.keys()):
(opNr, FS, BS, Run) = key
if (valsAll[key] != 0):
tabdAll.write('Operation ' + order[opNr] + ' Filesize[kB] = ' + str(FS) + ' Block size [kB] = ' + str(BS) + ' Run = ' + str(Run) + '\t' + str(round(valsAll[key],2)) + '\n')
tabdAllLine += 1
# compute statistics
(avgs, devs, errs, ci_mins, ci_maxes, gms, meds, frstQrts, thrdQrts, minVals, maxVals) = compute_all_stats(columnValues)
# write all statistic values to tabd sorted by operation
statValsOperation = (avgs, devs, ci_mins, ci_maxes, gms, meds, frstQrts, thrdQrts, minVals, maxVals)
write_tabd_stats(tabdOperation, statValsOperation)
write_oocalc_formulas(tabdOperation, str(dataLineNr))
tabdOperation.close()
# write all statistic values to all data tabd
columnValues = [allSetData]
(avgs, devs, errs, ci_mins, ci_maxes, gms, meds, frstQrts, thrdQrts, minVals, maxVals) = compute_all_stats(columnValues)
statValsAll = (avgs, devs, ci_mins, ci_maxes, gms, meds, frstQrts, thrdQrts, minVals, maxVals)
write_tabd_stats(tabdAll, statValsAll)
write_oocalc_formulas(tabdAll, str(tabdAllLine))
tabdAll.close()
return
# create visual comparisions of both sets plots and tab delimited
# data - both sets data in [[(FS, BSdata)],[(FS, BSdata)]] # [0] = base, [1] = set1 format
# operations - operations order for tab delimited
def xy_fsbs_avg(data, operations):
tabd = open(tabdDir+'/comparison_of_averages.tsv', 'w')
tabd.write('# comparison of averages for iozone measured throughput[MB/s] for any FS and any BS. Open it with: LC_ALL=en_US.UTF-8\n')
tabd.write('# Read this file into Open Office with command oocalc <filename>\n')
tabd.write('# Read this file into language R with command data <- read.delim("<filename>",comment.char = "#")\n')
tabd.write('Operation')
for operation in operations:
tabd.write('\t'+operation+' baseline\t'+operation+' set1')
tabd.write('\n')
vals = {}
# this might look strange, but when we don't have to search if key exists for every value,
# it saves a lot of runtime. Nicer code with key check will run for 30 secs, this one for four.
for setNr in range(len(data)):
for opNr in range(len(data[setNr])):
(x, y) = data[setNr][opNr]
Run = 0
FS = y[0][0]
for row in y:
if (row[0] == FS):
Run += 1
else:
FS = row[0]
Run = 1
for bsNr in range(len(x)):
vals[(FS, x[bsNr], opNr, setNr)] = []
# now is sure key exist, so place the values
for setNr in range(len(data)):
for opNr in range(len(data[setNr])):
(x, y) = data[setNr][opNr]
Run = 0
FS = y[0][0]
for row in y:
if (row[0] == FS):
Run += 1
else:
FS = row[0]
Run = 1
for bsNr in range(len(x)):
if (row[bsNr+1] != 0):
vals[(FS, x[bsNr], opNr, setNr)].append(row[bsNr+1])
# compute averages of vals lists
avgs = {}
for key in sorted(vals.keys()):
(FS, BS, opNr, setNr) = key
if (vals[(FS, BS, opNr, setNr)]):
avgs[(FS, BS, opNr, setNr)] = numpy.mean(vals[(FS, BS, opNr, setNr)])
# column values for operations
columnValues = []
for op in operations:
# two times - for both sets
columnValues.append([])
columnValues.append([])
# write tab delimited and fill columnValues list
row = []
for key in sorted(avgs.keys()):
(FS, BS, opNr, setNr) = key
val = avgs[(FS, BS, opNr, setNr)]
row.append(val)
if (opNr == 0 and setNr == 0):
caption = 'Filesize[kB] = ' + str(FS) + ' Block size [kB] = ' + str(BS)
if (opNr == (len(operations) - 1 ) and setNr == 1):
tabd.write(caption)
for val in row:
tabd.write('\t' + str(round(val,2)))
tabd.write('\n')
row = []
columnValues[2 * opNr + setNr].append(val)
# compute stats
slopes = []
std_errs = []
ci_mins = []
ci_maxes = []
for opNr in range(len(operations)):
(slope, std_err, ci_min, ci_max) = regline_slope(columnValues[2*opNr], columnValues[2*opNr+1])
slopes.append(slope)
std_errs.append(std_err)
ci_mins.append(ci_min)
ci_maxes.append(ci_max)
# create plots
for opNr in range(len(operations)):
name = operations[opNr] + '_compare'
baselineFasterX = []
baselineFasterY = []
set1FasterX = []
set1FasterY = []
# faster in baseline will be plotted in red, faster in set1 in black
for i in range(len(columnValues[2*opNr])):
if (columnValues[2*opNr][i] >= columnValues[2*opNr+1][i]):
baselineFasterX.append(columnValues[2*opNr][i])
baselineFasterY.append(columnValues[2*opNr+1][i])
else:
set1FasterX.append(columnValues[2*opNr][i])
set1FasterY.append(columnValues[2*opNr+1][i])
if (sorted(columnValues[2*opNr])[-1] > sorted(columnValues[2*opNr+1])[-1]):
max = sorted(columnValues[2*opNr])[-1]
else:
max = sorted(columnValues[2*opNr+1])[-1]
plt.clf()
fig = plt.figure()
ax = fig.add_subplot(111)
# Legend does not support <class 'matplotlib.collections.PolyCollection'> workaround. This line is not actually visible
d = ax.plot([0, 1.05*max], [0, 1.05*max*((ci_maxes[opNr]+ci_mins[opNr])/2)], '-', color='pink')
a = ax.plot(baselineFasterX, baselineFasterY, 'r.')
b = ax.plot(set1FasterX, set1FasterY, 'k.')
# y = x line
c = ax.plot([0, 1.05*max], [0, 1.05*max], 'k-')
# ci_min line
ax.plot([0, 1.05*max], [0, 1.05*max*ci_mins[opNr]], 'r-')
# ci_max line
ax.plot([0, 1.05*max], [0, 1.05*max*ci_maxes[opNr]], 'r-')
# filling between ci_min and ci_max lines
ax.fill_between([0, 1.05*max], [0, 1.05*max*ci_mins[opNr]], [0, 1.05*max*ci_maxes[opNr]], color='pink')
plt.grid(True)
plt.xlabel('baseline throughput [MB/s]')
plt.ylabel('set1 throughput [MB/s]')
plt.title('Linear regression of all ' + operations[opNr] + ' values')
font = FontProperties(size='x-small');
leg = plt.legend((a, b, c, d), ('Faster in baseline', 'Faster in set1', 'y = x line', 'reg. line 90% conf. int.'), loc=0, prop=font);
# Fedora 14 bug 562421 workaround
with warnings.catch_warnings():
warnings.filterwarnings("ignore",category=UserWarning)
plt.savefig(out_dir+'/'+name)
regLines = [] # regression lines info will be returned to be writted in form of tabs in __main__
tabd.write('# Slope of regression line')
for opNr in range(len(operations)):
tabd.write('\t' + str(round(slopes[opNr],5)) + '\t')
regLines.append((slopes[opNr], std_errs[opNr], ci_mins[opNr], ci_maxes[opNr]))
tabd.write('\n')
tabd.write('# Standard error of regression line slope')
for opNr in range(len(operations)):
tabd.write('\t' + str(round(std_errs[opNr],5)) + '\t')
tabd.write('\n')
tabd.write('# Regression line slope ci. min. 90%.')
for opNr in range(len(operations)):
tabd.write('\t' + str(round(ci_mins[opNr],5)) + '\t')
tabd.write('\n')
tabd.write('# Regression line slope ci. max 90%')
for opNr in range(len(operations)):
tabd.write('\t' + str(round(ci_maxes[opNr],5)) + '\t')
tabd.write('\n')
tabd.close()
return regLines
# compute slope of regression line
# Xvals - array of X-axis values
# Yvals - array of Y-axis values of same lenght as Xvals
# return counted slope, standard error, confidence interval min and max
def regline_slope(Xvals, Yvals):
x = numpy.array(Xvals)
y = numpy.array(Yvals)
AverageX = numpy.mean(Xvals)
# slope a solves
# a^2 * Sum[xi yi] + a * Sum [xi^2 - yi^2] - Sum [xi yi] = 0
A = numpy.sum(x*y)
B = numpy.sum([x**2 - y**2])
discriminant = numpy.sqrt( B**2 + 4 * A**2)
a = ( -B + discriminant ) / ( 2 * A )
# distance of points from line with slope=a
D = numpy.abs(a*x-y) / numpy.sqrt(a**2 + 1)
# standard error of a
a_se = numpy.sqrt( numpy.sum(D**2) / numpy.sum((x - AverageX)**2) / (len(x) - 1) )
# 90% confidence interval
h = a_se * stats.t._ppf((1+0.90)/2., len(x)-1)
return (a, a_se, a-h, a+h)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Parse iozone files')
parser.add_argument('--baseline', nargs='+',type=is_file_readable,required=True,
help='Set of iozone result files to form the baseline.')
parser.add_argument('--set1', nargs='+',type=is_file_readable,required=True,
help='Set of iozone results files to compared against baseline.')
parser.add_argument('--detail', nargs=1, required=False, action='store',
help='Name of operation to have a closer look at.')
args = parser.parse_args()
# parse input files
base=Parse_iozone(args.baseline)
set1=Parse_iozone(args.set1)
print 'Creating plots. This will take several seconds.'
# detail mode
if (args.detail != None):
operation = ''.join(args.detail)
detail(base, set1, operation)
sys.exit()
try:
shutil.rmtree('./'+out_dir)
shutil.rmtree('./'+tabdDir)
except:
pass
# prepare output directories
os.makedirs(out_dir)
os.makedirs(tabdDir)
shutil.copyfile('./stylesheet.css',out_dir+'/stylesheet.css')
htmldoc=open(out_dir+'/'+out_file,'w')
write_header(htmldoc, base.files, set1.files);
for operation in set1.operations:
htmldoc.write('<a href=\"#' + operation + '\">' + operation_name(operation) + '</a><br>\n')
htmldoc.write('<a href=\"#summary\">Summary</a><br><hr>\n')
# all results for operation for both sets for tab delimited
#resultsForOp = [[(FS, BSdata)],[(FS, BSdata)]] # [0] = base, [1] = set1
resultsForOp = [[],[]]
for operation in set1.operations:
(x1,y1)=base.get_BS_list_for_any_FS(operation)
(x2,y2)=set1.get_BS_list_for_any_FS(operation)
resultsForOp[0].append((x1, y1))
resultsForOp[1].append((x2, y2))
tab_delimited_summary(resultsForOp, set1.operations)
regLines = xy_fsbs_avg(resultsForOp, set1.operations)
opNr = 0
# write tables, tab delimited output and create plots for every operation
for operation in set1.operations:
# block size oriented tables and plots
data = []
data.append(['File size [kB]','Block size [kB]'])
data.append(['baseline','black'])
(x1,y1)=base.get_FS_list_for_any_BS(operation)
data[0].append(x1)
data[1].append(y1)
data.append(['set1','red'])
(x2,y2)=set1.get_FS_list_for_any_BS(operation)
assert(x1==x2)
data[2].append(y2)
htmldoc.write('<h3 id="' + operation + '">' + operation_name(operation) + '</h3>\n')
write_operation(htmldoc,operation,data,operation+'_FS')
tab_delimited(data, operation, 'BS')
# file size oriented tables and plots
data = []
data.append(['Block size [kB]','File size [kB]'])
data.append(['baseline','black'])
(x1,y1)=base.get_BS_list_for_any_FS(operation)
data[0].append(x1)
data[1].append(y1)
data.append(['set1','red'])
(x2,y2)=set1.get_BS_list_for_any_FS(operation)
assert(x1==x2)
data[2].append(y2)
write_operation(htmldoc,operation,data,operation+'_BS')
tab_delimited(data, operation, 'FS')
# visual comparision of both sets plot and table
htmldoc.write('<img src=\"'+operation+'_compare.png\" alt=\"'+operation+'\_compare" class="plot"/>\n')
(slope, std_err, ci_min, ci_max) = regLines[opNr]
htmldoc.write('<div class=\"rawdata abovetable\">See <a href=\"../'+tabdDir+'/comparison_of_averages.tsv\">comparison_of_averages.tsv</a>.</div>\n')
htmldoc.write('<table><tr><th colspan="2"> Regression line </th></tr>\n')
htmldoc.write('<tr class=\"topline\"><td> slope </td><td>' + str(round(slope,5)) + '</td></tr>\n')
htmldoc.write('<tr class=\"topline\"><td> std. error </td><td>' + str(round(std_err,5)) + '</td></tr>\n')
htmldoc.write('<tr class=\"topline bottomline\"><td> ci. max 90% </td><td>' + str(round(ci_min,5)) + '</td></tr>\n')
htmldoc.write('<tr><td> ci. min. 90% </td><td>' + str(round(ci_max,5)) + '</td></tr></table>\n')
opNr += 1
htmldoc.write('<a href="#top">Back on top</a>\n')
htmldoc.write('<hr>\n')
# all results for operation for both sets for html
resultsForOp = [{},{}] # [0] = base, [1] = set1; dict key is op name
for operation in set1.operations:
y1 = base.get_all_for_operation(operation)
resultsForOp[0][operation] = y1
y2 = set1.get_all_for_operation(operation)
resultsForOp[1][operation] = y2
# results for all blocksizes and filesizes for one set, needed for summary at the end
allSetData = [] # [0] = base, [1] = set1
allSetData.append(base.get_all_data_list())
allSetData.append(set1.get_all_data_list())
resultsForOp[0]['ALL'] = allSetData[0]
resultsForOp[1]['ALL'] = allSetData[1]
write_overFSBS_summary(resultsForOp, ['ALL'] + set1.operations, regLines)
htmldoc.write('<a href="#top">Back on top</a>\n')
write_footer(htmldoc)
htmldoc.close()
print 'Finished.\nTo view results open in your web browser:'
print 'file://' + os.getcwd() + '/Compare-html/index.html'
| gpl-3.0 |
3drobotics/MAVProxy | MAVProxy/modules/lib/live_graph.py | 4 | 8385 | #!/usr/bin/env python
"""
MAVProxy realtime graphing module, partly based on the wx graphing
demo by Eli Bendersky (eliben@gmail.com)
http://eli.thegreenplace.net/files/prog_code/wx_mpl_dynamic_graph.py.txt
"""
from MAVProxy.modules.lib import mp_util
class LiveGraph():
'''
a live graph object using wx and matplotlib
All of the GUI work is done in a child process to provide some insulation
from the parent mavproxy instance and prevent instability in the GCS
New data is sent to the LiveGraph instance via a pipe
'''
def __init__(self,
fields,
title='MAVProxy: LiveGraph',
timespan=20.0,
tickresolution=0.2,
colors=[ 'red', 'green', 'blue', 'orange', 'olive', 'cyan', 'magenta', 'brown', 'darkgreen',
'violet', 'purple', 'grey', 'black']):
import multiprocessing
self.fields = fields
self.colors = colors
self.title = title
self.timespan = timespan
self.tickresolution = tickresolution
self.values = [None]*len(self.fields)
self.parent_pipe,self.child_pipe = multiprocessing.Pipe()
self.close_graph = multiprocessing.Event()
self.close_graph.clear()
self.child = multiprocessing.Process(target=self.child_task)
self.child.start()
def child_task(self):
'''child process - this holds all the GUI elements'''
mp_util.child_close_fds()
import wx, matplotlib
matplotlib.use('WXAgg')
app = wx.PySimpleApp()
app.frame = GraphFrame(state=self)
app.frame.Show()
app.MainLoop()
def add_values(self, values):
'''add some data to the graph'''
if self.child.is_alive():
self.parent_pipe.send(values)
def close(self):
'''close the graph'''
self.close_graph.set()
if self.is_alive():
self.child.join(2)
def is_alive(self):
'''check if graph is still going'''
return self.child.is_alive()
import wx
class GraphFrame(wx.Frame):
""" The main frame of the application
"""
def __init__(self, state):
wx.Frame.__init__(self, None, -1, state.title)
self.state = state
self.data = []
for i in range(len(state.fields)):
self.data.append([])
self.paused = False
self.create_main_panel()
self.Bind(wx.EVT_IDLE, self.on_idle)
self.redraw_timer = wx.Timer(self)
self.Bind(wx.EVT_TIMER, self.on_redraw_timer, self.redraw_timer)
self.redraw_timer.Start(1000*self.state.tickresolution)
def create_main_panel(self):
from matplotlib.backends.backend_wxagg import \
FigureCanvasWxAgg as FigCanvas
self.panel = wx.Panel(self)
self.init_plot()
self.canvas = FigCanvas(self.panel, -1, self.fig)
self.close_button = wx.Button(self.panel, -1, "Close")
self.Bind(wx.EVT_BUTTON, self.on_close_button, self.close_button)
self.pause_button = wx.Button(self.panel, -1, "Pause")
self.Bind(wx.EVT_BUTTON, self.on_pause_button, self.pause_button)
self.Bind(wx.EVT_UPDATE_UI, self.on_update_pause_button, self.pause_button)
self.hbox1 = wx.BoxSizer(wx.HORIZONTAL)
self.hbox1.Add(self.close_button, border=5, flag=wx.ALL | wx.ALIGN_CENTER_VERTICAL)
self.hbox1.AddSpacer(1)
self.hbox1.Add(self.pause_button, border=5, flag=wx.ALL | wx.ALIGN_CENTER_VERTICAL)
self.vbox = wx.BoxSizer(wx.VERTICAL)
self.vbox.Add(self.canvas, 1, flag=wx.LEFT | wx.TOP | wx.GROW)
self.vbox.Add(self.hbox1, 0, flag=wx.ALIGN_LEFT | wx.TOP)
self.panel.SetSizer(self.vbox)
self.vbox.Fit(self)
def init_plot(self):
self.dpi = 100
import pylab, numpy
from matplotlib.figure import Figure
self.fig = Figure((6.0, 3.0), dpi=self.dpi)
self.axes = self.fig.add_subplot(111)
self.axes.set_axis_bgcolor('white')
pylab.setp(self.axes.get_xticklabels(), fontsize=8)
pylab.setp(self.axes.get_yticklabels(), fontsize=8)
# plot the data as a line series, and save the reference
# to the plotted line series
#
self.plot_data = []
if len(self.data[0]) == 0:
max_y = min_y = 0
else:
max_y = min_y = self.data[0][0]
for i in range(len(self.data)):
p = self.axes.plot(
self.data[i],
linewidth=1,
color=self.state.colors[i],
label=self.state.fields[i],
)[0]
self.plot_data.append(p)
if len(self.data[i]) != 0:
min_y = min(min_y, min(self.data[i]))
max_y = max(max_y, max(self.data[i]))
# create X data
self.xdata = numpy.arange(-self.state.timespan, 0, self.state.tickresolution)
self.axes.set_xbound(lower=self.xdata[0], upper=0)
if min_y == max_y:
self.axes.set_ybound(min_y, max_y+0.1)
self.axes.legend(self.state.fields, loc='upper left', bbox_to_anchor=(0, 1.1))
def draw_plot(self):
""" Redraws the plot
"""
import numpy, pylab
state = self.state
if len(self.data[0]) == 0:
print("no data to plot")
return
vhigh = max(self.data[0])
vlow = min(self.data[0])
for i in range(1,len(self.plot_data)):
vhigh = max(vhigh, max(self.data[i]))
vlow = min(vlow, min(self.data[i]))
ymin = vlow - 0.05*(vhigh-vlow)
ymax = vhigh + 0.05*(vhigh-vlow)
if ymin == ymax:
ymax = ymin + 0.1
ymin = ymin - 0.1
self.axes.set_ybound(lower=ymin, upper=ymax)
self.axes.grid(True, color='gray')
pylab.setp(self.axes.get_xticklabels(), visible=True)
pylab.setp(self.axes.get_legend().get_texts(), fontsize='small')
for i in range(len(self.plot_data)):
ydata = numpy.array(self.data[i])
xdata = self.xdata
if len(ydata) < len(self.xdata):
xdata = xdata[-len(ydata):]
self.plot_data[i].set_xdata(xdata)
self.plot_data[i].set_ydata(ydata)
self.canvas.draw()
def on_pause_button(self, event):
self.paused = not self.paused
def on_update_pause_button(self, event):
label = "Resume" if self.paused else "Pause"
self.pause_button.SetLabel(label)
def on_close_button(self, event):
self.redraw_timer.Stop()
self.Destroy()
def on_idle(self, event):
import time
time.sleep(self.state.tickresolution*0.5)
def on_redraw_timer(self, event):
# if paused do not add data, but still redraw the plot
# (to respond to scale modifications, grid change, etc.)
#
state = self.state
if state.close_graph.wait(0.001):
self.redraw_timer.Stop()
self.Destroy()
return
while state.child_pipe.poll():
state.values = state.child_pipe.recv()
if self.paused:
return
for i in range(len(self.plot_data)):
if state.values[i] is not None:
self.data[i].append(state.values[i])
while len(self.data[i]) > len(self.xdata):
self.data[i].pop(0)
for i in range(len(self.plot_data)):
if state.values[i] is None or len(self.data[i]) < 2:
return
self.draw_plot()
if __name__ == "__main__":
# test the graph
import time, math
livegraph = LiveGraph(['sin(t)', 'cos(t)', 'sin(t+1)',
'cos(t+1)', 'sin(t+2)', 'cos(t+2)',
'cos(t+1)', 'sin(t+2)', 'cos(t+2)', 'x'],
timespan=30,
title='Graph Test')
while livegraph.is_alive():
t = time.time()
livegraph.add_values([math.sin(t), math.cos(t),
math.sin(t+1), math.cos(t+1),
math.sin(t+1), math.cos(t+1),
math.sin(t+1), math.cos(t+1),
math.sin(t+2), math.cos(t+2)])
time.sleep(0.05)
| gpl-3.0 |
idiap/zentas | python/experiments/skl_eak_zen.py | 1 | 5111 | # Copyright (c) 2017 Idiap Research Institute, http://www.idiap.ch/
# Written by James Newling <jnewling@idiap.ch>
"""
experiments comparing:
- scikit-learn,
- eakmeans and
- zentas.
"""
import matplotlib.pyplot as pl
import sys
import random
import numpy as np
import numpy.random as npr
#where is pyzentas.so ? Make sure this is correct.
sys.path.append("../../build/python")
import pyzentas
from IPython.core.debugger import Tracer
import time
import rna
reload(rna)
def go(X, K, withskl, witheak, withzen):
"""
X : data
K : number of clusters
withskl, witheak, withzen : bools indicating whether to run with em.
"""
indices_init = np.arange(K, dtype = np.uint64)
C_init = X[indices_init]
results = {}
if withskl == True:
results["skl"] = {}
from sklearn.cluster import KMeans
# run until convergence, initialise with scikit-learn's special version of k-means++ (see zentas wiki entry for discussion).
sklc = KMeans(n_clusters = K, init = "k-means++", max_iter = 100000000, tol = 1e-20, verbose = 0, n_init = 1)
tsk0 = time.time()
sklc.fit(X)
tsk1 = time.time()
sklacc = np.sum(np.min(np.sum((np.expand_dims(X, axis = 1) - np.expand_dims(sklc.cluster_centers_, axis = 0))**2, axis = 2), axis = 1)) / X.shape[0]
results["skl"]["t"] = tsk1 - tsk0
results["skl"]["mse"] = sklacc
if witheak:
results["eak"] = {}
sys.path.append(datapaths.datapath["eaklibdir"])
import kmeans
teak0 = time.time()
eak = kmeans.get_clustering(X, K, verbose = 1, init = "kmeans++", n_threads = 4)
teak1 = time.time()
results["eak"]["t"] = teak1 - teak0
results["eak"]['mse'] = eak["mse"]
if withzen:
results["zen"] = {}
# run with zentas. pipeline here is (1) kmeans++ (2) clarans (3) lloyd.
z = pyzentas.pyzen(K = K, metric = 'l2', energy = 'quadratic', max_itok = 10.0, max_time = 5.0, max_proposals = K**2, seed = npr.randint(1000), patient = True, nthreads = 4, init = "kmeans++-4", with_tests = False, capture_output = True, rooted = False)
tzen0 = time.time()
tangerine = z.den(X, do_vdimap = True, do_refinement = True, rf_max_rounds = 10000000)
tzen1 = time.time()
results["zen"]["t"] = tzen0 - tzen1
results["zen"]["out"] = pyzentas.get_processed_output(tangerine['output'])
results["zen"]['mse'] = results["zen"]["out"]["mE"][-1]
return results
def experiment1():
#set the experiment settings here :
withskl = True
witheak = False
withzen = True
K = 200
ndata = 30*K
seed = 107
nruns = 10
npr.seed(seed)
dataset = "rna" #"random" # or "rna" or "mnist".
if dataset == "mnist":
import mnist
reload(mnist)
X = mnist.read_MNIST(dataset = "original", ndata = ndata)/1000.
elif dataset == "rna":
X = rna.get_rna()[0:ndata, 2::]
X += 0.001*npr.randn(X.shape[0], X.shape[1])
npr.seed()
elif dataset == "random":
X = npr.randn(ndata, 2)
else:
raise RuntimeError("unrecognised dataset string")
for i in range(nruns):
npr.seed()
results = go(X, K, withskl, witheak, withzen)
if withskl:
label = "scikit-learn" if i == 0 else None
pl.plot(results["skl"]["t"], results["skl"]["mse"], marker = "o", color = 'k', markersize = 15, label = label)
if witheak:
label = "eakmeans" if i == 0 else None
pl.plot(results["eak"]["t"], results["eak"]["mse"], marker = "x", color = 'k', markersize = 15, label = label)
if withzen:
pl.plot(results["zen"]["out"]["Tt"]/1000., results["zen"]["out"]["mE"], color = 'k', marker = "+", markersize = 2, linestyle = "none")
label = "zentas" if i == 0 else None
pl.plot(results["zen"]["out"]["Tt"][-1]/1000., results["zen"]["out"]["mE"][-1], color = 'k', marker = "+", markersize = 15, linestyle = "none", label = label)
pl.ion()
pl.figure(1)
pl.legend()
pl.show()
def sklearn_elkan():
"""
a scikit-learn inconsisitency due to numerical rounding
"""
import numpy.random as npr
from sklearn.cluster import KMeans
seed = 51220
npr.seed(seed)
N = 1200
K = 100
X = npr.randn(N, 2)**7
indices_init = np.arange(K, dtype = np.uint64)
C_init = X[indices_init]
for alg in ["elkan", "full"]:
sklc = KMeans(n_clusters = K, init = C_init, max_iter = int(1e6), tol = 1e-20, verbose = 0, n_init = 1, algorithm = alg)
sklc.fit(X)
print "final E with algorithm ", alg, "\t : \t", np.sum(np.min(np.sum((np.expand_dims(X, axis = 1) - np.expand_dims(sklc.cluster_centers_, axis = 0))**2, axis = 2), axis = 1)) / X.shape[0]
def mnist_example():
"""
Showing how do_vdimap can help.
"""
import mnist
reload(mnist)
ndata = int(1e3)
X = mnist.read_MNIST(dataset = "original", ndata = ndata)/1000.
dimension = X[0].shape[-1]
npr.seed(1000)
z = pyzentas.pyzen(K = 100, metric = 'l2', energy = 'quadratic', exponent_coeff = 0, max_itok = 10000, max_rounds = 20, seed = 1011, nthreads = 1, init = "kmeans++-5", with_tests = False, patient = False)
do_vdimap = True
tangerine = z.den(X, do_vdimap, do_refinement = True)
| gpl-3.0 |
bigdataelephants/scikit-learn | examples/ensemble/plot_forest_importances.py | 241 | 1761 | """
=========================================
Feature importances with forests of trees
=========================================
This examples shows the use of forests of trees to evaluate the importance of
features on an artificial classification task. The red bars are the feature
importances of the forest, along with their inter-trees variability.
As expected, the plot suggests that 3 features are informative, while the
remaining are not.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import make_classification
from sklearn.ensemble import ExtraTreesClassifier
# Build a classification task using 3 informative features
X, y = make_classification(n_samples=1000,
n_features=10,
n_informative=3,
n_redundant=0,
n_repeated=0,
n_classes=2,
random_state=0,
shuffle=False)
# Build a forest and compute the feature importances
forest = ExtraTreesClassifier(n_estimators=250,
random_state=0)
forest.fit(X, y)
importances = forest.feature_importances_
std = np.std([tree.feature_importances_ for tree in forest.estimators_],
axis=0)
indices = np.argsort(importances)[::-1]
# Print the feature ranking
print("Feature ranking:")
for f in range(10):
print("%d. feature %d (%f)" % (f + 1, indices[f], importances[indices[f]]))
# Plot the feature importances of the forest
plt.figure()
plt.title("Feature importances")
plt.bar(range(10), importances[indices],
color="r", yerr=std[indices], align="center")
plt.xticks(range(10), indices)
plt.xlim([-1, 10])
plt.show()
| bsd-3-clause |
brev/nupic | external/linux32/lib/python2.6/site-packages/matplotlib/backends/backend_qtagg.py | 73 | 4972 | """
Render to qt from agg
"""
from __future__ import division
import os, sys
import matplotlib
from matplotlib import verbose
from matplotlib.figure import Figure
from backend_agg import FigureCanvasAgg
from backend_qt import qt, FigureManagerQT, FigureCanvasQT,\
show, draw_if_interactive, backend_version, \
NavigationToolbar2QT
DEBUG = False
def new_figure_manager( num, *args, **kwargs ):
"""
Create a new figure manager instance
"""
if DEBUG: print 'backend_qtagg.new_figure_manager'
FigureClass = kwargs.pop('FigureClass', Figure)
thisFig = FigureClass( *args, **kwargs )
canvas = FigureCanvasQTAgg( thisFig )
return FigureManagerQTAgg( canvas, num )
class NavigationToolbar2QTAgg(NavigationToolbar2QT):
def _get_canvas(self, fig):
return FigureCanvasQTAgg(fig)
class FigureManagerQTAgg(FigureManagerQT):
def _get_toolbar(self, canvas, parent):
# must be inited after the window, drawingArea and figure
# attrs are set
if matplotlib.rcParams['toolbar']=='classic':
print "Classic toolbar is not yet supported"
elif matplotlib.rcParams['toolbar']=='toolbar2':
toolbar = NavigationToolbar2QTAgg(canvas, parent)
else:
toolbar = None
return toolbar
class FigureCanvasQTAgg( FigureCanvasAgg, FigureCanvasQT ):
"""
The canvas the figure renders into. Calls the draw and print fig
methods, creates the renderers, etc...
Public attribute
figure - A Figure instance
"""
def __init__( self, figure ):
if DEBUG: print 'FigureCanvasQtAgg: ', figure
FigureCanvasQT.__init__( self, figure )
FigureCanvasAgg.__init__( self, figure )
self.drawRect = False
self.rect = []
self.replot = True
self.pixmap = qt.QPixmap()
def resizeEvent( self, e ):
FigureCanvasQT.resizeEvent( self, e )
def drawRectangle( self, rect ):
self.rect = rect
self.drawRect = True
# False in repaint does not clear the image before repainting
self.repaint( False )
def paintEvent( self, e ):
"""
Draw to the Agg backend and then copy the image to the qt.drawable.
In Qt, all drawing should be done inside of here when a widget is
shown onscreen.
"""
FigureCanvasQT.paintEvent( self, e )
if DEBUG: print 'FigureCanvasQtAgg.paintEvent: ', self, \
self.get_width_height()
p = qt.QPainter( self )
# only replot data when needed
if type(self.replot) is bool: # might be a bbox for blitting
if self.replot:
FigureCanvasAgg.draw( self )
#stringBuffer = str( self.buffer_rgba(0,0) )
# matplotlib is in rgba byte order.
# qImage wants to put the bytes into argb format and
# is in a 4 byte unsigned int. little endian system is LSB first
# and expects the bytes in reverse order (bgra).
if ( qt.QImage.systemByteOrder() == qt.QImage.LittleEndian ):
stringBuffer = self.renderer._renderer.tostring_bgra()
else:
stringBuffer = self.renderer._renderer.tostring_argb()
qImage = qt.QImage( stringBuffer, self.renderer.width,
self.renderer.height, 32, None, 0,
qt.QImage.IgnoreEndian )
self.pixmap.convertFromImage( qImage, qt.QPixmap.Color )
p.drawPixmap( qt.QPoint( 0, 0 ), self.pixmap )
# draw the zoom rectangle to the QPainter
if ( self.drawRect ):
p.setPen( qt.QPen( qt.Qt.black, 1, qt.Qt.DotLine ) )
p.drawRect( self.rect[0], self.rect[1], self.rect[2], self.rect[3] )
# we are blitting here
else:
bbox = self.replot
l, b, r, t = bbox.extents
w = int(r) - int(l)
h = int(t) - int(b)
reg = self.copy_from_bbox(bbox)
stringBuffer = reg.to_string_argb()
qImage = qt.QImage(stringBuffer, w, h, 32, None, 0, qt.QImage.IgnoreEndian)
self.pixmap.convertFromImage(qImage, qt.QPixmap.Color)
p.drawPixmap(qt.QPoint(l, self.renderer.height-t), self.pixmap)
p.end()
self.replot = False
self.drawRect = False
def draw( self ):
"""
Draw the figure when xwindows is ready for the update
"""
if DEBUG: print "FigureCanvasQtAgg.draw", self
self.replot = True
FigureCanvasAgg.draw(self)
self.repaint(False)
def blit(self, bbox=None):
"""
Blit the region in bbox
"""
self.replot = bbox
self.repaint(False)
def print_figure(self, *args, **kwargs):
FigureCanvasAgg.print_figure(self, *args, **kwargs)
self.draw()
| agpl-3.0 |
vasudevk/sklearn_pycon2015 | notebooks/fig_code/ML_flow_chart.py | 61 | 4970 | """
Tutorial Diagrams
-----------------
This script plots the flow-charts used in the scikit-learn tutorials.
"""
import numpy as np
import pylab as pl
from matplotlib.patches import Circle, Rectangle, Polygon, Arrow, FancyArrow
def create_base(box_bg = '#CCCCCC',
arrow1 = '#88CCFF',
arrow2 = '#88FF88',
supervised=True):
fig = pl.figure(figsize=(9, 6), facecolor='w')
ax = pl.axes((0, 0, 1, 1),
xticks=[], yticks=[], frameon=False)
ax.set_xlim(0, 9)
ax.set_ylim(0, 6)
patches = [Rectangle((0.3, 3.6), 1.5, 1.8, zorder=1, fc=box_bg),
Rectangle((0.5, 3.8), 1.5, 1.8, zorder=2, fc=box_bg),
Rectangle((0.7, 4.0), 1.5, 1.8, zorder=3, fc=box_bg),
Rectangle((2.9, 3.6), 0.2, 1.8, fc=box_bg),
Rectangle((3.1, 3.8), 0.2, 1.8, fc=box_bg),
Rectangle((3.3, 4.0), 0.2, 1.8, fc=box_bg),
Rectangle((0.3, 0.2), 1.5, 1.8, fc=box_bg),
Rectangle((2.9, 0.2), 0.2, 1.8, fc=box_bg),
Circle((5.5, 3.5), 1.0, fc=box_bg),
Polygon([[5.5, 1.7],
[6.1, 1.1],
[5.5, 0.5],
[4.9, 1.1]], fc=box_bg),
FancyArrow(2.3, 4.6, 0.35, 0, fc=arrow1,
width=0.25, head_width=0.5, head_length=0.2),
FancyArrow(3.75, 4.2, 0.5, -0.2, fc=arrow1,
width=0.25, head_width=0.5, head_length=0.2),
FancyArrow(5.5, 2.4, 0, -0.4, fc=arrow1,
width=0.25, head_width=0.5, head_length=0.2),
FancyArrow(2.0, 1.1, 0.5, 0, fc=arrow2,
width=0.25, head_width=0.5, head_length=0.2),
FancyArrow(3.3, 1.1, 1.3, 0, fc=arrow2,
width=0.25, head_width=0.5, head_length=0.2),
FancyArrow(6.2, 1.1, 0.8, 0, fc=arrow2,
width=0.25, head_width=0.5, head_length=0.2)]
if supervised:
patches += [Rectangle((0.3, 2.4), 1.5, 0.5, zorder=1, fc=box_bg),
Rectangle((0.5, 2.6), 1.5, 0.5, zorder=2, fc=box_bg),
Rectangle((0.7, 2.8), 1.5, 0.5, zorder=3, fc=box_bg),
FancyArrow(2.3, 2.9, 2.0, 0, fc=arrow1,
width=0.25, head_width=0.5, head_length=0.2),
Rectangle((7.3, 0.85), 1.5, 0.5, fc=box_bg)]
else:
patches += [Rectangle((7.3, 0.2), 1.5, 1.8, fc=box_bg)]
for p in patches:
ax.add_patch(p)
pl.text(1.45, 4.9, "Training\nText,\nDocuments,\nImages,\netc.",
ha='center', va='center', fontsize=14)
pl.text(3.6, 4.9, "Feature\nVectors",
ha='left', va='center', fontsize=14)
pl.text(5.5, 3.5, "Machine\nLearning\nAlgorithm",
ha='center', va='center', fontsize=14)
pl.text(1.05, 1.1, "New Text,\nDocument,\nImage,\netc.",
ha='center', va='center', fontsize=14)
pl.text(3.3, 1.7, "Feature\nVector",
ha='left', va='center', fontsize=14)
pl.text(5.5, 1.1, "Predictive\nModel",
ha='center', va='center', fontsize=12)
if supervised:
pl.text(1.45, 3.05, "Labels",
ha='center', va='center', fontsize=14)
pl.text(8.05, 1.1, "Expected\nLabel",
ha='center', va='center', fontsize=14)
pl.text(8.8, 5.8, "Supervised Learning Model",
ha='right', va='top', fontsize=18)
else:
pl.text(8.05, 1.1,
"Likelihood\nor Cluster ID\nor Better\nRepresentation",
ha='center', va='center', fontsize=12)
pl.text(8.8, 5.8, "Unsupervised Learning Model",
ha='right', va='top', fontsize=18)
def plot_supervised_chart(annotate=False):
create_base(supervised=True)
if annotate:
fontdict = dict(color='r', weight='bold', size=14)
pl.text(1.9, 4.55, 'X = vec.fit_transform(input)',
fontdict=fontdict,
rotation=20, ha='left', va='bottom')
pl.text(3.7, 3.2, 'clf.fit(X, y)',
fontdict=fontdict,
rotation=20, ha='left', va='bottom')
pl.text(1.7, 1.5, 'X_new = vec.transform(input)',
fontdict=fontdict,
rotation=20, ha='left', va='bottom')
pl.text(6.1, 1.5, 'y_new = clf.predict(X_new)',
fontdict=fontdict,
rotation=20, ha='left', va='bottom')
def plot_unsupervised_chart():
create_base(supervised=False)
if __name__ == '__main__':
plot_supervised_chart(False)
plot_supervised_chart(True)
plot_unsupervised_chart()
pl.show()
| bsd-3-clause |
yavalvas/yav_com | build/matplotlib/lib/matplotlib/offsetbox.py | 11 | 53384 | """
The OffsetBox is a simple container artist. The child artist are meant
to be drawn at a relative position to its parent. The [VH]Packer,
DrawingArea and TextArea are derived from the OffsetBox.
The [VH]Packer automatically adjust the relative postisions of their
children, which should be instances of the OffsetBox. This is used to
align similar artists together, e.g., in legend.
The DrawingArea can contain any Artist as a child. The
DrawingArea has a fixed width and height. The position of children
relative to the parent is fixed. The TextArea is contains a single
Text instance. The width and height of the TextArea instance is the
width and height of the its child text.
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
from six.moves import xrange, zip
import warnings
import matplotlib.transforms as mtransforms
import matplotlib.artist as martist
import matplotlib.text as mtext
import numpy as np
from matplotlib.transforms import Bbox, BboxBase, TransformedBbox
from matplotlib.font_manager import FontProperties
from matplotlib.patches import FancyBboxPatch, FancyArrowPatch
from matplotlib import rcParams
from matplotlib import docstring
#from bboximage import BboxImage
from matplotlib.image import BboxImage
from matplotlib.patches import bbox_artist as mbbox_artist
from matplotlib.text import _AnnotationBase
DEBUG = False
# for debuging use
def bbox_artist(*args, **kwargs):
if DEBUG:
mbbox_artist(*args, **kwargs)
# _get_packed_offsets() and _get_aligned_offsets() are coded assuming
# that we are packing boxes horizontally. But same function will be
# used with vertical packing.
def _get_packed_offsets(wd_list, total, sep, mode="fixed"):
"""
Geiven a list of (width, xdescent) of each boxes, calculate the
total width and the x-offset positions of each items according to
*mode*. xdescent is analagous to the usual descent, but along the
x-direction. xdescent values are currently ignored.
*wd_list* : list of (width, xdescent) of boxes to be packed.
*sep* : spacing between boxes
*total* : Intended total length. None if not used.
*mode* : packing mode. 'fixed', 'expand', or 'equal'.
"""
w_list, d_list = list(zip(*wd_list))
# d_list is currently not used.
if mode == "fixed":
offsets_ = np.add.accumulate([0] + [w + sep for w in w_list])
offsets = offsets_[:-1]
if total is None:
total = offsets_[-1] - sep
return total, offsets
elif mode == "expand":
if len(w_list) > 1:
sep = (total - sum(w_list)) / (len(w_list) - 1.)
else:
sep = 0.
offsets_ = np.add.accumulate([0] + [w + sep for w in w_list])
offsets = offsets_[:-1]
return total, offsets
elif mode == "equal":
maxh = max(w_list)
if total is None:
total = (maxh + sep) * len(w_list)
else:
sep = float(total) / (len(w_list)) - maxh
offsets = np.array([(maxh + sep) * i for i in range(len(w_list))])
return total, offsets
else:
raise ValueError("Unknown mode : %s" % (mode,))
def _get_aligned_offsets(hd_list, height, align="baseline"):
"""
Given a list of (height, descent) of each boxes, align the boxes
with *align* and calculate the y-offsets of each boxes.
total width and the offset positions of each items according to
*mode*. xdescent is analogous to the usual descent, but along the
x-direction. xdescent values are currently ignored.
*hd_list* : list of (width, xdescent) of boxes to be aligned.
*sep* : spacing between boxes
*height* : Intended total length. None if not used.
*align* : align mode. 'baseline', 'top', 'bottom', or 'center'.
"""
if height is None:
height = max([h for h, d in hd_list])
if align == "baseline":
height_descent = max([h - d for h, d in hd_list])
descent = max([d for h, d in hd_list])
height = height_descent + descent
offsets = [0. for h, d in hd_list]
elif align in ["left", "top"]:
descent = 0.
offsets = [d for h, d in hd_list]
elif align in ["right", "bottom"]:
descent = 0.
offsets = [height - h + d for h, d in hd_list]
elif align == "center":
descent = 0.
offsets = [(height - h) * .5 + d for h, d in hd_list]
else:
raise ValueError("Unknown Align mode : %s" % (align,))
return height, descent, offsets
class OffsetBox(martist.Artist):
"""
The OffsetBox is a simple container artist. The child artist are meant
to be drawn at a relative position to its parent.
"""
def __init__(self, *args, **kwargs):
super(OffsetBox, self).__init__(*args, **kwargs)
# Clipping has not been implemented in the OffesetBox family, so
# disable the clip flag for consistency. It can always be turned back
# on to zero effect.
self.set_clip_on(False)
self._children = []
self._offset = (0, 0)
def __getstate__(self):
state = martist.Artist.__getstate__(self)
# pickle cannot save instancemethods, so handle them here
from .cbook import _InstanceMethodPickler
import inspect
offset = state['_offset']
if inspect.ismethod(offset):
state['_offset'] = _InstanceMethodPickler(offset)
return state
def __setstate__(self, state):
self.__dict__ = state
from .cbook import _InstanceMethodPickler
if isinstance(self._offset, _InstanceMethodPickler):
self._offset = self._offset.get_instancemethod()
def set_figure(self, fig):
"""
Set the figure
accepts a class:`~matplotlib.figure.Figure` instance
"""
martist.Artist.set_figure(self, fig)
for c in self.get_children():
c.set_figure(fig)
def contains(self, mouseevent):
for c in self.get_children():
a, b = c.contains(mouseevent)
if a:
return a, b
return False, {}
def set_offset(self, xy):
"""
Set the offset
accepts x, y, tuple, or a callable object.
"""
self._offset = xy
def get_offset(self, width, height, xdescent, ydescent, renderer):
"""
Get the offset
accepts extent of the box
"""
if six.callable(self._offset):
return self._offset(width, height, xdescent, ydescent, renderer)
else:
return self._offset
def set_width(self, width):
"""
Set the width
accepts float
"""
self.width = width
def set_height(self, height):
"""
Set the height
accepts float
"""
self.height = height
def get_visible_children(self):
"""
Return a list of visible artists it contains.
"""
return [c for c in self._children if c.get_visible()]
def get_children(self):
"""
Return a list of artists it contains.
"""
return self._children
def get_extent_offsets(self, renderer):
raise Exception("")
def get_extent(self, renderer):
"""
Return with, height, xdescent, ydescent of box
"""
w, h, xd, yd, offsets = self.get_extent_offsets(renderer)
return w, h, xd, yd
def get_window_extent(self, renderer):
'''
get the bounding box in display space.
'''
w, h, xd, yd, offsets = self.get_extent_offsets(renderer)
px, py = self.get_offset(w, h, xd, yd, renderer)
return mtransforms.Bbox.from_bounds(px - xd, py - yd, w, h)
def draw(self, renderer):
"""
Update the location of children if necessary and draw them
to the given *renderer*.
"""
width, height, xdescent, ydescent, offsets = self.get_extent_offsets(
renderer)
px, py = self.get_offset(width, height, xdescent, ydescent, renderer)
for c, (ox, oy) in zip(self.get_visible_children(), offsets):
c.set_offset((px + ox, py + oy))
c.draw(renderer)
bbox_artist(self, renderer, fill=False, props=dict(pad=0.))
class PackerBase(OffsetBox):
def __init__(self, pad=None, sep=None, width=None, height=None,
align=None, mode=None,
children=None):
"""
Parameters
----------
pad : float, optional
Boundary pad.
sep : float, optional
Spacing between items.
width : float, optional
height : float, optional
Width and height of the container box, calculated if
`None`.
align : str, optional
Alignment of boxes. Can be one of ``top``, ``bottom``,
``left``, ``right``, ``center`` and ``baseline``
mode : str, optional
Packing mode.
Notes
-----
*pad* and *sep* need to given in points and will be scale with
the renderer dpi, while *width* and *height* need to be in
pixels.
"""
super(PackerBase, self).__init__()
self.height = height
self.width = width
self.sep = sep
self.pad = pad
self.mode = mode
self.align = align
self._children = children
class VPacker(PackerBase):
"""
The VPacker has its children packed vertically. It automatically
adjust the relative positions of children in the drawing time.
"""
def __init__(self, pad=None, sep=None, width=None, height=None,
align="baseline", mode="fixed",
children=None):
"""
Parameters
----------
pad : float, optional
Boundary pad.
sep : float, optional
Spacing between items.
width : float, optional
height : float, optional
width and height of the container box, calculated if
`None`.
align : str, optional
Alignment of boxes.
mode : str, optional
Packing mode.
Notes
-----
*pad* and *sep* need to given in points and will be scale with
the renderer dpi, while *width* and *height* need to be in
pixels.
"""
super(VPacker, self).__init__(pad, sep, width, height,
align, mode,
children)
def get_extent_offsets(self, renderer):
"""
update offset of childrens and return the extents of the box
"""
dpicor = renderer.points_to_pixels(1.)
pad = self.pad * dpicor
sep = self.sep * dpicor
if self.width is not None:
for c in self.get_visible_children():
if isinstance(c, PackerBase) and c.mode == "expand":
c.set_width(self.width)
whd_list = [c.get_extent(renderer)
for c in self.get_visible_children()]
whd_list = [(w, h, xd, (h - yd)) for w, h, xd, yd in whd_list]
wd_list = [(w, xd) for w, h, xd, yd in whd_list]
width, xdescent, xoffsets = _get_aligned_offsets(wd_list,
self.width,
self.align)
pack_list = [(h, yd) for w, h, xd, yd in whd_list]
height, yoffsets_ = _get_packed_offsets(pack_list, self.height,
sep, self.mode)
yoffsets = yoffsets_ + [yd for w, h, xd, yd in whd_list]
ydescent = height - yoffsets[0]
yoffsets = height - yoffsets
#w, h, xd, h_yd = whd_list[-1]
yoffsets = yoffsets - ydescent
return width + 2 * pad, height + 2 * pad, \
xdescent + pad, ydescent + pad, \
list(zip(xoffsets, yoffsets))
class HPacker(PackerBase):
"""
The HPacker has its children packed horizontally. It automatically
adjusts the relative positions of children at draw time.
"""
def __init__(self, pad=None, sep=None, width=None, height=None,
align="baseline", mode="fixed",
children=None):
"""
Parameters
----------
pad : float, optional
Boundary pad.
sep : float, optional
Spacing between items.
width : float, optional
height : float, optional
Width and height of the container box, calculated if
`None`.
align : str
Alignment of boxes.
mode : str
Packing mode.
Notes
-----
*pad* and *sep* need to given in points and will be scale with
the renderer dpi, while *width* and *height* need to be in
pixels.
"""
super(HPacker, self).__init__(pad, sep, width, height,
align, mode, children)
def get_extent_offsets(self, renderer):
"""
update offset of children and return the extents of the box
"""
dpicor = renderer.points_to_pixels(1.)
pad = self.pad * dpicor
sep = self.sep * dpicor
whd_list = [c.get_extent(renderer)
for c in self.get_visible_children()]
if not whd_list:
return 2 * pad, 2 * pad, pad, pad, []
if self.height is None:
height_descent = max([h - yd for w, h, xd, yd in whd_list])
ydescent = max([yd for w, h, xd, yd in whd_list])
height = height_descent + ydescent
else:
height = self.height - 2 * pad # width w/o pad
hd_list = [(h, yd) for w, h, xd, yd in whd_list]
height, ydescent, yoffsets = _get_aligned_offsets(hd_list,
self.height,
self.align)
pack_list = [(w, xd) for w, h, xd, yd in whd_list]
width, xoffsets_ = _get_packed_offsets(pack_list, self.width,
sep, self.mode)
xoffsets = xoffsets_ + [xd for w, h, xd, yd in whd_list]
xdescent = whd_list[0][2]
xoffsets = xoffsets - xdescent
return width + 2 * pad, height + 2 * pad, \
xdescent + pad, ydescent + pad, \
list(zip(xoffsets, yoffsets))
class PaddedBox(OffsetBox):
def __init__(self, child, pad=None, draw_frame=False, patch_attrs=None):
"""
*pad* : boundary pad
.. note::
*pad* need to given in points and will be
scale with the renderer dpi, while *width* and *height*
need to be in pixels.
"""
super(PaddedBox, self).__init__()
self.pad = pad
self._children = [child]
self.patch = FancyBboxPatch(
xy=(0.0, 0.0), width=1., height=1.,
facecolor='w', edgecolor='k',
mutation_scale=1, # self.prop.get_size_in_points(),
snap=True
)
self.patch.set_boxstyle("square", pad=0)
if patch_attrs is not None:
self.patch.update(patch_attrs)
self._drawFrame = draw_frame
def get_extent_offsets(self, renderer):
"""
update offset of childrens and return the extents of the box
"""
dpicor = renderer.points_to_pixels(1.)
pad = self.pad * dpicor
w, h, xd, yd = self._children[0].get_extent(renderer)
return w + 2 * pad, h + 2 * pad, \
xd + pad, yd + pad, \
[(0, 0)]
def draw(self, renderer):
"""
Update the location of children if necessary and draw them
to the given *renderer*.
"""
width, height, xdescent, ydescent, offsets = self.get_extent_offsets(
renderer)
px, py = self.get_offset(width, height, xdescent, ydescent, renderer)
for c, (ox, oy) in zip(self.get_visible_children(), offsets):
c.set_offset((px + ox, py + oy))
self.draw_frame(renderer)
for c in self.get_visible_children():
c.draw(renderer)
#bbox_artist(self, renderer, fill=False, props=dict(pad=0.))
def update_frame(self, bbox, fontsize=None):
self.patch.set_bounds(bbox.x0, bbox.y0,
bbox.width, bbox.height)
if fontsize:
self.patch.set_mutation_scale(fontsize)
def draw_frame(self, renderer):
# update the location and size of the legend
bbox = self.get_window_extent(renderer)
self.update_frame(bbox)
if self._drawFrame:
self.patch.draw(renderer)
class DrawingArea(OffsetBox):
"""
The DrawingArea can contain any Artist as a child. The DrawingArea
has a fixed width and height. The position of children relative to
the parent is fixed.
"""
def __init__(self, width, height, xdescent=0.,
ydescent=0., clip=True):
"""
*width*, *height* : width and height of the container box.
*xdescent*, *ydescent* : descent of the box in x- and y-direction.
"""
super(DrawingArea, self).__init__()
self.width = width
self.height = height
self.xdescent = xdescent
self.ydescent = ydescent
self.offset_transform = mtransforms.Affine2D()
self.offset_transform.clear()
self.offset_transform.translate(0, 0)
self.dpi_transform = mtransforms.Affine2D()
def get_transform(self):
"""
Return the :class:`~matplotlib.transforms.Transform` applied
to the children
"""
return self.dpi_transform + self.offset_transform
def set_transform(self, t):
"""
set_transform is ignored.
"""
pass
def set_offset(self, xy):
"""
set offset of the container.
Accept : tuple of x,y cooridnate in disokay units.
"""
self._offset = xy
self.offset_transform.clear()
self.offset_transform.translate(xy[0], xy[1])
def get_offset(self):
"""
return offset of the container.
"""
return self._offset
def get_window_extent(self, renderer):
'''
get the bounding box in display space.
'''
w, h, xd, yd = self.get_extent(renderer)
ox, oy = self.get_offset() # w, h, xd, yd)
return mtransforms.Bbox.from_bounds(ox - xd, oy - yd, w, h)
def get_extent(self, renderer):
"""
Return with, height, xdescent, ydescent of box
"""
dpi_cor = renderer.points_to_pixels(1.)
return self.width * dpi_cor, self.height * dpi_cor, \
self.xdescent * dpi_cor, self.ydescent * dpi_cor
def add_artist(self, a):
'Add any :class:`~matplotlib.artist.Artist` to the container box'
self._children.append(a)
if not a.is_transform_set():
a.set_transform(self.get_transform())
def draw(self, renderer):
"""
Draw the children
"""
dpi_cor = renderer.points_to_pixels(1.)
self.dpi_transform.clear()
self.dpi_transform.scale(dpi_cor, dpi_cor)
for c in self._children:
c.draw(renderer)
bbox_artist(self, renderer, fill=False, props=dict(pad=0.))
class TextArea(OffsetBox):
"""
The TextArea is contains a single Text instance. The text is
placed at (0,0) with baseline+left alignment. The width and height
of the TextArea instance is the width and height of the its child
text.
"""
def __init__(self, s,
textprops=None,
multilinebaseline=None,
minimumdescent=True,
):
"""
Parameters
----------
s : str
a string to be displayed.
textprops : `~matplotlib.font_manager.FontProperties`, optional
multilinebaseline : bool, optional
If `True`, baseline for multiline text is adjusted so that
it is (approximatedly) center-aligned with singleline
text.
minimumdescent : bool, optional
If `True`, the box has a minimum descent of "p".
"""
if textprops is None:
textprops = {}
if "va" not in textprops:
textprops["va"] = "baseline"
self._text = mtext.Text(0, 0, s, **textprops)
OffsetBox.__init__(self)
self._children = [self._text]
self.offset_transform = mtransforms.Affine2D()
self.offset_transform.clear()
self.offset_transform.translate(0, 0)
self._baseline_transform = mtransforms.Affine2D()
self._text.set_transform(self.offset_transform +
self._baseline_transform)
self._multilinebaseline = multilinebaseline
self._minimumdescent = minimumdescent
def set_text(self, s):
"set text"
self._text.set_text(s)
def get_text(self):
"get text"
return self._text.get_text()
def set_multilinebaseline(self, t):
"""
Set multilinebaseline .
If True, baseline for multiline text is
adjusted so that it is (approximatedly) center-aligned with
singleline text.
"""
self._multilinebaseline = t
def get_multilinebaseline(self):
"""
get multilinebaseline .
"""
return self._multilinebaseline
def set_minimumdescent(self, t):
"""
Set minimumdescent .
If True, extent of the single line text is adjusted so that
it has minimum descent of "p"
"""
self._minimumdescent = t
def get_minimumdescent(self):
"""
get minimumdescent.
"""
return self._minimumdescent
def set_transform(self, t):
"""
set_transform is ignored.
"""
pass
def set_offset(self, xy):
"""
set offset of the container.
Accept : tuple of x,y coordinates in display units.
"""
self._offset = xy
self.offset_transform.clear()
self.offset_transform.translate(xy[0], xy[1])
def get_offset(self):
"""
return offset of the container.
"""
return self._offset
def get_window_extent(self, renderer):
'''
get the bounding box in display space.
'''
w, h, xd, yd = self.get_extent(renderer)
ox, oy = self.get_offset() # w, h, xd, yd)
return mtransforms.Bbox.from_bounds(ox - xd, oy - yd, w, h)
def get_extent(self, renderer):
clean_line, ismath = self._text.is_math_text(self._text._text)
_, h_, d_ = renderer.get_text_width_height_descent(
"lp", self._text._fontproperties, ismath=False)
bbox, info, d = self._text._get_layout(renderer)
w, h = bbox.width, bbox.height
line = info[-1][0] # last line
self._baseline_transform.clear()
if len(info) > 1 and self._multilinebaseline:
d_new = 0.5 * h - 0.5 * (h_ - d_)
self._baseline_transform.translate(0, d - d_new)
d = d_new
else: # single line
h_d = max(h_ - d_, h - d)
if self.get_minimumdescent():
## to have a minimum descent, #i.e., "l" and "p" have same
## descents.
d = max(d, d_)
#else:
# d = d
h = h_d + d
return w, h, 0., d
def draw(self, renderer):
"""
Draw the children
"""
self._text.draw(renderer)
bbox_artist(self, renderer, fill=False, props=dict(pad=0.))
class AuxTransformBox(OffsetBox):
"""
Offset Box with the aux_transform . Its children will be
transformed with the aux_transform first then will be
offseted. The absolute coordinate of the aux_transform is meaning
as it will be automatically adjust so that the left-lower corner
of the bounding box of children will be set to (0,0) before the
offset transform.
It is similar to drawing area, except that the extent of the box
is not predetermined but calculated from the window extent of its
children. Furthermore, the extent of the children will be
calculated in the transformed coordinate.
"""
def __init__(self, aux_transform):
self.aux_transform = aux_transform
OffsetBox.__init__(self)
self.offset_transform = mtransforms.Affine2D()
self.offset_transform.clear()
self.offset_transform.translate(0, 0)
# ref_offset_transform is used to make the offset_transform is
# always reference to the lower-left corner of the bbox of its
# children.
self.ref_offset_transform = mtransforms.Affine2D()
self.ref_offset_transform.clear()
def add_artist(self, a):
'Add any :class:`~matplotlib.artist.Artist` to the container box'
self._children.append(a)
a.set_transform(self.get_transform())
def get_transform(self):
"""
Return the :class:`~matplotlib.transforms.Transform` applied
to the children
"""
return self.aux_transform + \
self.ref_offset_transform + \
self.offset_transform
def set_transform(self, t):
"""
set_transform is ignored.
"""
pass
def set_offset(self, xy):
"""
set offset of the container.
Accept : tuple of x,y coordinate in disokay units.
"""
self._offset = xy
self.offset_transform.clear()
self.offset_transform.translate(xy[0], xy[1])
def get_offset(self):
"""
return offset of the container.
"""
return self._offset
def get_window_extent(self, renderer):
'''
get the bounding box in display space.
'''
w, h, xd, yd = self.get_extent(renderer)
ox, oy = self.get_offset() # w, h, xd, yd)
return mtransforms.Bbox.from_bounds(ox - xd, oy - yd, w, h)
def get_extent(self, renderer):
# clear the offset transforms
_off = self.offset_transform.to_values() # to be restored later
self.ref_offset_transform.clear()
self.offset_transform.clear()
# calculate the extent
bboxes = [c.get_window_extent(renderer) for c in self._children]
ub = mtransforms.Bbox.union(bboxes)
# adjust ref_offset_tansform
self.ref_offset_transform.translate(-ub.x0, -ub.y0)
# restor offset transform
mtx = self.offset_transform.matrix_from_values(*_off)
self.offset_transform.set_matrix(mtx)
return ub.width, ub.height, 0., 0.
def draw(self, renderer):
"""
Draw the children
"""
for c in self._children:
c.draw(renderer)
bbox_artist(self, renderer, fill=False, props=dict(pad=0.))
class AnchoredOffsetbox(OffsetBox):
"""
An offset box placed according to the legend location
loc. AnchoredOffsetbox has a single child. When multiple children
is needed, use other OffsetBox class to enclose them. By default,
the offset box is anchored against its parent axes. You may
explicitly specify the bbox_to_anchor.
"""
zorder = 5 # zorder of the legend
def __init__(self, loc,
pad=0.4, borderpad=0.5,
child=None, prop=None, frameon=True,
bbox_to_anchor=None,
bbox_transform=None,
**kwargs):
"""
loc is a string or an integer specifying the legend location.
The valid location codes are::
'upper right' : 1,
'upper left' : 2,
'lower left' : 3,
'lower right' : 4,
'right' : 5,
'center left' : 6,
'center right' : 7,
'lower center' : 8,
'upper center' : 9,
'center' : 10,
pad : pad around the child for drawing a frame. given in
fraction of fontsize.
borderpad : pad between offsetbox frame and the bbox_to_anchor,
child : OffsetBox instance that will be anchored.
prop : font property. This is only used as a reference for paddings.
frameon : draw a frame box if True.
bbox_to_anchor : bbox to anchor. Use self.axes.bbox if None.
bbox_transform : with which the bbox_to_anchor will be transformed.
"""
super(AnchoredOffsetbox, self).__init__(**kwargs)
self.set_bbox_to_anchor(bbox_to_anchor, bbox_transform)
self.set_child(child)
self.loc = loc
self.borderpad = borderpad
self.pad = pad
if prop is None:
self.prop = FontProperties(size=rcParams["legend.fontsize"])
elif isinstance(prop, dict):
self.prop = FontProperties(**prop)
if "size" not in prop:
self.prop.set_size(rcParams["legend.fontsize"])
else:
self.prop = prop
self.patch = FancyBboxPatch(
xy=(0.0, 0.0), width=1., height=1.,
facecolor='w', edgecolor='k',
mutation_scale=self.prop.get_size_in_points(),
snap=True
)
self.patch.set_boxstyle("square", pad=0)
self._drawFrame = frameon
def set_child(self, child):
"set the child to be anchored"
self._child = child
def get_child(self):
"return the child"
return self._child
def get_children(self):
"return the list of children"
return [self._child]
def get_extent(self, renderer):
"""
return the extent of the artist. The extent of the child
added with the pad is returned
"""
w, h, xd, yd = self.get_child().get_extent(renderer)
fontsize = renderer.points_to_pixels(self.prop.get_size_in_points())
pad = self.pad * fontsize
return w + 2 * pad, h + 2 * pad, xd + pad, yd + pad
def get_bbox_to_anchor(self):
"""
return the bbox that the legend will be anchored
"""
if self._bbox_to_anchor is None:
return self.axes.bbox
else:
transform = self._bbox_to_anchor_transform
if transform is None:
return self._bbox_to_anchor
else:
return TransformedBbox(self._bbox_to_anchor,
transform)
def set_bbox_to_anchor(self, bbox, transform=None):
"""
set the bbox that the child will be anchored.
*bbox* can be a Bbox instance, a list of [left, bottom, width,
height], or a list of [left, bottom] where the width and
height will be assumed to be zero. The bbox will be
transformed to display coordinate by the given transform.
"""
if bbox is None or isinstance(bbox, BboxBase):
self._bbox_to_anchor = bbox
else:
try:
l = len(bbox)
except TypeError:
raise ValueError("Invalid argument for bbox : %s" % str(bbox))
if l == 2:
bbox = [bbox[0], bbox[1], 0, 0]
self._bbox_to_anchor = Bbox.from_bounds(*bbox)
self._bbox_to_anchor_transform = transform
def get_window_extent(self, renderer):
'''
get the bounding box in display space.
'''
self._update_offset_func(renderer)
w, h, xd, yd = self.get_extent(renderer)
ox, oy = self.get_offset(w, h, xd, yd, renderer)
return Bbox.from_bounds(ox - xd, oy - yd, w, h)
def _update_offset_func(self, renderer, fontsize=None):
"""
Update the offset func which depends on the dpi of the
renderer (because of the padding).
"""
if fontsize is None:
fontsize = renderer.points_to_pixels(
self.prop.get_size_in_points())
def _offset(w, h, xd, yd, renderer, fontsize=fontsize, self=self):
bbox = Bbox.from_bounds(0, 0, w, h)
borderpad = self.borderpad * fontsize
bbox_to_anchor = self.get_bbox_to_anchor()
x0, y0 = self._get_anchored_bbox(self.loc,
bbox,
bbox_to_anchor,
borderpad)
return x0 + xd, y0 + yd
self.set_offset(_offset)
def update_frame(self, bbox, fontsize=None):
self.patch.set_bounds(bbox.x0, bbox.y0,
bbox.width, bbox.height)
if fontsize:
self.patch.set_mutation_scale(fontsize)
def draw(self, renderer):
"draw the artist"
if not self.get_visible():
return
fontsize = renderer.points_to_pixels(self.prop.get_size_in_points())
self._update_offset_func(renderer, fontsize)
if self._drawFrame:
# update the location and size of the legend
bbox = self.get_window_extent(renderer)
self.update_frame(bbox, fontsize)
self.patch.draw(renderer)
width, height, xdescent, ydescent = self.get_extent(renderer)
px, py = self.get_offset(width, height, xdescent, ydescent, renderer)
self.get_child().set_offset((px, py))
self.get_child().draw(renderer)
def _get_anchored_bbox(self, loc, bbox, parentbbox, borderpad):
"""
return the position of the bbox anchored at the parentbbox
with the loc code, with the borderpad.
"""
assert loc in range(1, 11) # called only internally
BEST, UR, UL, LL, LR, R, CL, CR, LC, UC, C = list(xrange(11))
anchor_coefs = {UR: "NE",
UL: "NW",
LL: "SW",
LR: "SE",
R: "E",
CL: "W",
CR: "E",
LC: "S",
UC: "N",
C: "C"}
c = anchor_coefs[loc]
container = parentbbox.padded(-borderpad)
anchored_box = bbox.anchored(c, container=container)
return anchored_box.x0, anchored_box.y0
class AnchoredText(AnchoredOffsetbox):
"""
AnchoredOffsetbox with Text.
"""
def __init__(self, s, loc, pad=0.4, borderpad=0.5, prop=None, **kwargs):
"""
Parameters
----------
s : string
Text.
loc : str
Location code.
pad : float, optional
Pad between the text and the frame as fraction of the font
size.
borderpad : float, optional
Pad between the frame and the axes (or *bbox_to_anchor*).
prop : `matplotlib.font_manager.FontProperties`
Font properties.
Notes
-----
Other keyword parameters of `AnchoredOffsetbox` are also
allowed.
"""
if prop is None:
prop = {}
propkeys = list(six.iterkeys(prop))
badkwargs = ('ha', 'horizontalalignment', 'va', 'verticalalignment')
if set(badkwargs) & set(propkeys):
warnings.warn("Mixing horizontalalignment or verticalalignment "
"with AnchoredText is not supported.")
self.txt = TextArea(s, textprops=prop,
minimumdescent=False)
fp = self.txt._text.get_fontproperties()
super(AnchoredText, self).__init__(loc, pad=pad, borderpad=borderpad,
child=self.txt,
prop=fp,
**kwargs)
class OffsetImage(OffsetBox):
def __init__(self, arr,
zoom=1,
cmap=None,
norm=None,
interpolation=None,
origin=None,
filternorm=1,
filterrad=4.0,
resample=False,
dpi_cor=True,
**kwargs
):
self._dpi_cor = dpi_cor
self.image = BboxImage(bbox=self.get_window_extent,
cmap=cmap,
norm=norm,
interpolation=interpolation,
origin=origin,
filternorm=filternorm,
filterrad=filterrad,
resample=resample,
**kwargs
)
self._children = [self.image]
self.set_zoom(zoom)
self.set_data(arr)
OffsetBox.__init__(self)
def set_data(self, arr):
self._data = np.asarray(arr)
self.image.set_data(self._data)
def get_data(self):
return self._data
def set_zoom(self, zoom):
self._zoom = zoom
def get_zoom(self):
return self._zoom
# def set_axes(self, axes):
# self.image.set_axes(axes)
# martist.Artist.set_axes(self, axes)
# def set_offset(self, xy):
# """
# set offset of the container.
# Accept : tuple of x,y coordinate in disokay units.
# """
# self._offset = xy
# self.offset_transform.clear()
# self.offset_transform.translate(xy[0], xy[1])
def get_offset(self):
"""
return offset of the container.
"""
return self._offset
def get_children(self):
return [self.image]
def get_window_extent(self, renderer):
'''
get the bounding box in display space.
'''
w, h, xd, yd = self.get_extent(renderer)
ox, oy = self.get_offset()
return mtransforms.Bbox.from_bounds(ox - xd, oy - yd, w, h)
def get_extent(self, renderer):
# FIXME dpi_cor is never used
if self._dpi_cor: # True, do correction
dpi_cor = renderer.points_to_pixels(1.)
else:
dpi_cor = 1.
zoom = self.get_zoom()
data = self.get_data()
ny, nx = data.shape[:2]
w, h = nx * zoom, ny * zoom
return w, h, 0, 0
def draw(self, renderer):
"""
Draw the children
"""
self.image.draw(renderer)
#bbox_artist(self, renderer, fill=False, props=dict(pad=0.))
class AnnotationBbox(martist.Artist, _AnnotationBase):
"""
Annotation-like class, but with offsetbox instead of Text.
"""
zorder = 3
def __str__(self):
return "AnnotationBbox(%g,%g)" % (self.xy[0], self.xy[1])
@docstring.dedent_interpd
def __init__(self, offsetbox, xy,
xybox=None,
xycoords='data',
boxcoords=None,
frameon=True, pad=0.4, # BboxPatch
annotation_clip=None,
box_alignment=(0.5, 0.5),
bboxprops=None,
arrowprops=None,
fontsize=None,
**kwargs):
"""
*offsetbox* : OffsetBox instance
*xycoords* : same as Annotation but can be a tuple of two
strings which are interpreted as x and y coordinates.
*boxcoords* : similar to textcoords as Annotation but can be a
tuple of two strings which are interpreted as x and y
coordinates.
*box_alignment* : a tuple of two floats for a vertical and
horizontal alignment of the offset box w.r.t. the *boxcoords*.
The lower-left corner is (0.0) and upper-right corner is (1.1).
other parameters are identical to that of Annotation.
"""
self.offsetbox = offsetbox
self.arrowprops = arrowprops
self.set_fontsize(fontsize)
if xybox is None:
self.xybox = xy
else:
self.xybox = xybox
if boxcoords is None:
self.boxcoords = xycoords
else:
self.boxcoords = boxcoords
if arrowprops is not None:
self._arrow_relpos = self.arrowprops.pop("relpos", (0.5, 0.5))
self.arrow_patch = FancyArrowPatch((0, 0), (1, 1),
**self.arrowprops)
else:
self._arrow_relpos = None
self.arrow_patch = None
_AnnotationBase.__init__(self,
xy,
xycoords=xycoords,
annotation_clip=annotation_clip)
martist.Artist.__init__(self, **kwargs)
#self._fw, self._fh = 0., 0. # for alignment
self._box_alignment = box_alignment
# frame
self.patch = FancyBboxPatch(
xy=(0.0, 0.0), width=1., height=1.,
facecolor='w', edgecolor='k',
mutation_scale=self.prop.get_size_in_points(),
snap=True
)
self.patch.set_boxstyle("square", pad=pad)
if bboxprops:
self.patch.set(**bboxprops)
self._drawFrame = frameon
@property
def xyann(self):
return self.xybox
@xyann.setter
def xyann(self, xyann):
self.xybox = xyann
@property
def anncoords(self):
return self.boxcoords
@anncoords.setter
def anncoords(self, coords):
self.boxcoords = coords
def contains(self, event):
t, tinfo = self.offsetbox.contains(event)
#if self.arrow_patch is not None:
# a,ainfo=self.arrow_patch.contains(event)
# t = t or a
# self.arrow_patch is currently not checked as this can be a line - JJ
return t, tinfo
def get_children(self):
children = [self.offsetbox, self.patch]
if self.arrow_patch:
children.append(self.arrow_patch)
return children
def set_figure(self, fig):
if self.arrow_patch is not None:
self.arrow_patch.set_figure(fig)
self.offsetbox.set_figure(fig)
martist.Artist.set_figure(self, fig)
def set_fontsize(self, s=None):
"""
set fontsize in points
"""
if s is None:
s = rcParams["legend.fontsize"]
self.prop = FontProperties(size=s)
def get_fontsize(self, s=None):
"""
return fontsize in points
"""
return self.prop.get_size_in_points()
def update_positions(self, renderer):
"""
Update the pixel positions of the annotated point and the text.
"""
xy_pixel = self._get_position_xy(renderer)
self._update_position_xybox(renderer, xy_pixel)
mutation_scale = renderer.points_to_pixels(self.get_fontsize())
self.patch.set_mutation_scale(mutation_scale)
if self.arrow_patch:
self.arrow_patch.set_mutation_scale(mutation_scale)
def _update_position_xybox(self, renderer, xy_pixel):
"""
Update the pixel positions of the annotation text and the arrow
patch.
"""
x, y = self.xybox
if isinstance(self.boxcoords, tuple):
xcoord, ycoord = self.boxcoords
x1, y1 = self._get_xy(renderer, x, y, xcoord)
x2, y2 = self._get_xy(renderer, x, y, ycoord)
ox0, oy0 = x1, y2
else:
ox0, oy0 = self._get_xy(renderer, x, y, self.boxcoords)
w, h, xd, yd = self.offsetbox.get_extent(renderer)
_fw, _fh = self._box_alignment
self.offsetbox.set_offset((ox0 - _fw * w + xd, oy0 - _fh * h + yd))
# update patch position
bbox = self.offsetbox.get_window_extent(renderer)
#self.offsetbox.set_offset((ox0-_fw*w, oy0-_fh*h))
self.patch.set_bounds(bbox.x0, bbox.y0,
bbox.width, bbox.height)
x, y = xy_pixel
ox1, oy1 = x, y
if self.arrowprops:
x0, y0 = x, y
d = self.arrowprops.copy()
# Use FancyArrowPatch if self.arrowprops has "arrowstyle" key.
# adjust the starting point of the arrow relative to
# the textbox.
# TODO : Rotation needs to be accounted.
relpos = self._arrow_relpos
ox0 = bbox.x0 + bbox.width * relpos[0]
oy0 = bbox.y0 + bbox.height * relpos[1]
# The arrow will be drawn from (ox0, oy0) to (ox1,
# oy1). It will be first clipped by patchA and patchB.
# Then it will be shrinked by shirnkA and shrinkB
# (in points). If patch A is not set, self.bbox_patch
# is used.
self.arrow_patch.set_positions((ox0, oy0), (ox1, oy1))
fs = self.prop.get_size_in_points()
mutation_scale = d.pop("mutation_scale", fs)
mutation_scale = renderer.points_to_pixels(mutation_scale)
self.arrow_patch.set_mutation_scale(mutation_scale)
patchA = d.pop("patchA", self.patch)
self.arrow_patch.set_patchA(patchA)
def draw(self, renderer):
"""
Draw the :class:`Annotation` object to the given *renderer*.
"""
if renderer is not None:
self._renderer = renderer
if not self.get_visible():
return
xy_pixel = self._get_position_xy(renderer)
if not self._check_xy(renderer, xy_pixel):
return
self.update_positions(renderer)
if self.arrow_patch is not None:
if self.arrow_patch.figure is None and self.figure is not None:
self.arrow_patch.figure = self.figure
self.arrow_patch.draw(renderer)
if self._drawFrame:
self.patch.draw(renderer)
self.offsetbox.draw(renderer)
class DraggableBase(object):
"""
helper code for a draggable artist (legend, offsetbox)
The derived class must override following two method.
def saveoffset(self):
pass
def update_offset(self, dx, dy):
pass
*saveoffset* is called when the object is picked for dragging and it is
meant to save reference position of the artist.
*update_offset* is called during the dragging. dx and dy is the pixel
offset from the point where the mouse drag started.
Optionally you may override following two methods.
def artist_picker(self, artist, evt):
return self.ref_artist.contains(evt)
def finalize_offset(self):
pass
*artist_picker* is a picker method that will be
used. *finalize_offset* is called when the mouse is released. In
current implementaion of DraggableLegend and DraggableAnnotation,
*update_offset* places the artists simply in display
coordinates. And *finalize_offset* recalculate their position in
the normalized axes coordinate and set a relavant attribute.
"""
def __init__(self, ref_artist, use_blit=False):
self.ref_artist = ref_artist
self.got_artist = False
self.canvas = self.ref_artist.figure.canvas
self._use_blit = use_blit and self.canvas.supports_blit
c2 = self.canvas.mpl_connect('pick_event', self.on_pick)
c3 = self.canvas.mpl_connect('button_release_event', self.on_release)
ref_artist.set_picker(self.artist_picker)
self.cids = [c2, c3]
def on_motion(self, evt):
if self.got_artist:
dx = evt.x - self.mouse_x
dy = evt.y - self.mouse_y
self.update_offset(dx, dy)
self.canvas.draw()
def on_motion_blit(self, evt):
if self.got_artist:
dx = evt.x - self.mouse_x
dy = evt.y - self.mouse_y
self.update_offset(dx, dy)
self.canvas.restore_region(self.background)
self.ref_artist.draw(self.ref_artist.figure._cachedRenderer)
self.canvas.blit(self.ref_artist.figure.bbox)
def on_pick(self, evt):
if evt.artist == self.ref_artist:
self.mouse_x = evt.mouseevent.x
self.mouse_y = evt.mouseevent.y
self.got_artist = True
if self._use_blit:
self.ref_artist.set_animated(True)
self.canvas.draw()
self.background = self.canvas.copy_from_bbox(
self.ref_artist.figure.bbox)
self.ref_artist.draw(self.ref_artist.figure._cachedRenderer)
self.canvas.blit(self.ref_artist.figure.bbox)
self._c1 = self.canvas.mpl_connect('motion_notify_event',
self.on_motion_blit)
else:
self._c1 = self.canvas.mpl_connect('motion_notify_event',
self.on_motion)
self.save_offset()
def on_release(self, event):
if self.got_artist:
self.finalize_offset()
self.got_artist = False
self.canvas.mpl_disconnect(self._c1)
if self._use_blit:
self.ref_artist.set_animated(False)
def disconnect(self):
"""disconnect the callbacks"""
for cid in self.cids:
self.canvas.mpl_disconnect(cid)
def artist_picker(self, artist, evt):
return self.ref_artist.contains(evt)
def save_offset(self):
pass
def update_offset(self, dx, dy):
pass
def finalize_offset(self):
pass
class DraggableOffsetBox(DraggableBase):
def __init__(self, ref_artist, offsetbox, use_blit=False):
DraggableBase.__init__(self, ref_artist, use_blit=use_blit)
self.offsetbox = offsetbox
def save_offset(self):
offsetbox = self.offsetbox
renderer = offsetbox.figure._cachedRenderer
w, h, xd, yd = offsetbox.get_extent(renderer)
offset = offsetbox.get_offset(w, h, xd, yd, renderer)
self.offsetbox_x, self.offsetbox_y = offset
self.offsetbox.set_offset(offset)
def update_offset(self, dx, dy):
loc_in_canvas = self.offsetbox_x + dx, self.offsetbox_y + dy
self.offsetbox.set_offset(loc_in_canvas)
def get_loc_in_canvas(self):
offsetbox = self.offsetbox
renderer = offsetbox.figure._cachedRenderer
w, h, xd, yd = offsetbox.get_extent(renderer)
ox, oy = offsetbox._offset
loc_in_canvas = (ox - xd, oy - yd)
return loc_in_canvas
class DraggableAnnotation(DraggableBase):
def __init__(self, annotation, use_blit=False):
DraggableBase.__init__(self, annotation, use_blit=use_blit)
self.annotation = annotation
def save_offset(self):
ann = self.annotation
x, y = ann.xyann
if isinstance(ann.anncoords, tuple):
xcoord, ycoord = ann.anncoords
x1, y1 = ann._get_xy(self.canvas.renderer, x, y, xcoord)
x2, y2 = ann._get_xy(self.canvas.renderer, x, y, ycoord)
ox0, oy0 = x1, y2
else:
ox0, oy0 = ann._get_xy(self.canvas.renderer, x, y, ann.anncoords)
self.ox, self.oy = ox0, oy0
self.annotation.anncoords = "figure pixels"
self.update_offset(0, 0)
def update_offset(self, dx, dy):
ann = self.annotation
ann.xyann = self.ox + dx, self.oy + dy
x, y = ann.xyann
def finalize_offset(self):
loc_in_canvas = self.annotation.xyann
self.annotation.anncoords = "axes fraction"
pos_axes_fraction = self.annotation.axes.transAxes.inverted()
pos_axes_fraction = pos_axes_fraction.transform_point(loc_in_canvas)
self.annotation.xyann = tuple(pos_axes_fraction)
if __name__ == "__main__":
import matplotlib.pyplot as plt
fig = plt.figure(1)
fig.clf()
ax = plt.subplot(121)
#txt = ax.text(0.5, 0.5, "Test", size=30, ha="center", color="w")
kwargs = dict()
a = np.arange(256).reshape(16, 16) / 256.
myimage = OffsetImage(a,
zoom=2,
norm=None,
origin=None,
**kwargs
)
ax.add_artist(myimage)
myimage.set_offset((100, 100))
myimage2 = OffsetImage(a,
zoom=2,
norm=None,
origin=None,
**kwargs
)
ann = AnnotationBbox(myimage2, (0.5, 0.5),
xybox=(30, 30),
xycoords='data',
boxcoords="offset points",
frameon=True, pad=0.4, # BboxPatch
bboxprops=dict(boxstyle="round", fc="y"),
fontsize=None,
arrowprops=dict(arrowstyle="->"),
)
ax.add_artist(ann)
plt.draw()
plt.show()
| mit |
npuichigo/ttsflow | third_party/tensorflow/tensorflow/contrib/learn/python/learn/tests/dataframe/arithmetic_transform_test.py | 62 | 2343 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for arithmetic transforms."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.learn.python.learn.dataframe import tensorflow_dataframe as df
from tensorflow.python.platform import test
# pylint: disable=g-import-not-at-top
try:
import pandas as pd
HAS_PANDAS = True
except ImportError:
HAS_PANDAS = False
class SumTestCase(test.TestCase):
"""Test class for `Sum` transform."""
def testSum(self):
if not HAS_PANDAS:
return
num_rows = 100
pandas_df = pd.DataFrame({
"a": np.arange(num_rows),
"b": np.arange(num_rows, 2 * num_rows)
})
frame = df.TensorFlowDataFrame.from_pandas(
pandas_df, shuffle=False, batch_size=num_rows)
frame["a+b"] = frame["a"] + frame["b"]
expected_sum = pandas_df["a"] + pandas_df["b"]
actual_sum = frame.run_one_batch()["a+b"]
np.testing.assert_array_equal(expected_sum, actual_sum)
class DifferenceTestCase(test.TestCase):
"""Test class for `Difference` transform."""
def testDifference(self):
if not HAS_PANDAS:
return
num_rows = 100
pandas_df = pd.DataFrame({
"a": np.arange(num_rows),
"b": np.arange(num_rows, 2 * num_rows)
})
frame = df.TensorFlowDataFrame.from_pandas(
pandas_df, shuffle=False, batch_size=num_rows)
frame["a-b"] = frame["a"] - frame["b"]
expected_diff = pandas_df["a"] - pandas_df["b"]
actual_diff = frame.run_one_batch()["a-b"]
np.testing.assert_array_equal(expected_diff, actual_diff)
if __name__ == "__main__":
test.main()
| apache-2.0 |
neuropoly/spinalcordtoolbox | spinalcordtoolbox/registration/landmarks.py | 1 | 14660 | #!/usr/bin/env python
#########################################################################################
#
# This file contains an implementation of the iterative closest point algorithm.
# This algo registers two sets of points (3D coordinates) together.
#
# Adapted from:
# http://stackoverflow.com/questions/20120384/iterative-closest-point-icp-implementation-on-python
#
# NOTES ON ITK Transform Files:
# http://www.neuro.polymtl.ca/tips_and_tricks/how_to_use_ants#itk_transform_file
#
# ---------------------------------------------------------------------------------------
# Copyright (c) 2013 Polytechnique Montreal <www.neuro.polymtl.ca>
# Author: Benjamin De Leener, Julien Cohen-Adad
# Created: 2015-06-10
#
# About the license: see the file LICENSE.TXT
#########################################################################################
# TODO: homogeneize input parameters: (src=src, dest=dest), instead of (dest, src).
# TODO: add full affine transfo
# TODO: normalize SSE: currently, it depends on the number of landmarks
import sys, io, os
import logging
from operator import itemgetter
from nibabel import load
import numpy as np
from scipy.optimize import minimize
from spinalcordtoolbox.image import Image
logger = logging.getLogger(__name__)
sse_results = []
ini_param_rotation = 0.5 # rad
ini_param_trans_x = 270.0 # pix
ini_param_trans_y = -150.0 # pix
initial_step = 2
def register_landmarks(fname_src, fname_dest, dof, fname_affine='affine.txt', verbose=1):
"""
Register two NIFTI volumes containing landmarks
:param fname_src: fname of source landmarks
:param fname_dest: fname of destination landmarks
:param dof: degree of freedom. Separate with "_". Example: Tx_Ty_Tz_Rx_Ry_Sz
:param fname_affine: output affine transformation
:param verbose: 0, 1, 2
:return:
"""
# open src label
im_src = Image(fname_src)
# coord_src = im_src.getNonZeroCoordinates(sorting='value') # landmarks are sorted by value
coord_src = im_src.getCoordinatesAveragedByValue() # landmarks are sorted by value
# open dest labels
im_dest = Image(fname_dest)
# coord_dest = im_dest.getNonZeroCoordinates(sorting='value')
coord_dest = im_dest.getCoordinatesAveragedByValue()
# Reorganize landmarks
points_src, points_dest = [], []
for coord in coord_src:
point_src = im_src.transfo_pix2phys([[coord.x, coord.y, coord.z]])
# convert NIFTI to ITK world coordinate
# points_src.append([point_src[0][0], point_src[0][1], point_src[0][2]])
points_src.append([-point_src[0][0], -point_src[0][1], point_src[0][2]])
for coord in coord_dest:
point_dest = im_dest.transfo_pix2phys([[coord.x, coord.y, coord.z]])
# convert NIFTI to ITK world coordinate
# points_dest.append([point_dest[0][0], point_dest[0][1], point_dest[0][2]])
points_dest.append([-point_dest[0][0], -point_dest[0][1], point_dest[0][2]])
# display
logger.info(f"Labels src: {str(points_src)}")
logger.info(f"Labels dest: {str(points_dest)}")
logger.info(f"Degrees of freedom (dof): {dof}")
if len(coord_src) != len(coord_dest):
raise Exception('Error: number of source and destination landmarks are not the same, so landmarks cannot be paired.')
# estimate transformation
# N.B. points_src and points_dest are inverted below, because ITK uses inverted transformation matrices, i.e., src->dest is defined in dest instead of src.
# (rotation_matrix, translation_array, points_moving_reg, points_moving_barycenter) = getRigidTransformFromLandmarks(points_dest, points_src, constraints=dof, verbose=verbose, path_qc=path_qc)
(rotation_matrix, translation_array, points_moving_reg, points_moving_barycenter) = getRigidTransformFromLandmarks(points_src, points_dest, constraints=dof, verbose=verbose)
# writing rigid transformation file
# N.B. x and y dimensions have a negative sign to ensure compatibility between Python and ITK transfo
text_file = open(fname_affine, 'w')
text_file.write("#Insight Transform File V1.0\n")
text_file.write("#Transform 0\n")
text_file.write("Transform: AffineTransform_double_3_3\n")
text_file.write("Parameters: %.9f %.9f %.9f %.9f %.9f %.9f %.9f %.9f %.9f %.9f %.9f %.9f\n" % (
rotation_matrix[0, 0], rotation_matrix[0, 1], rotation_matrix[0, 2],
rotation_matrix[1, 0], rotation_matrix[1, 1], rotation_matrix[1, 2],
rotation_matrix[2, 0], rotation_matrix[2, 1], rotation_matrix[2, 2],
translation_array[0, 0], translation_array[0, 1], translation_array[0, 2]))
text_file.write("FixedParameters: %.9f %.9f %.9f\n" % (points_moving_barycenter[0],
points_moving_barycenter[1],
points_moving_barycenter[2]))
text_file.close()
def getNeighbors(point, set_points, k=1):
'''
Locate most similar neighbours
:param point: the point for which we want to compute the neighbors
:param trainingSet: list of other Points
:param k: number of neighbors wanted
:return: k nearest neighbors of input point
'''
distances = []
for other_point in set_points:
dist = point.euclideanDistance(other_point)
distances.append((other_point, dist))
distances.sort(key=itemgetter(1))
return [distances[x][0] for x in range(k)]
def SSE(pointsA, pointsB):
"""
Compute sum of squared error between pair-wise landmarks
:param pointsA:
:param pointsB:
:return:
"""
return np.sum(np.array(pointsA[:, 0:3] - pointsB[:, 0:3])**2.0)
def real_optimization_parameters(param_from_optimizer, initial_param = 0, initial_step = 10):
# The initial step for the Nelder-Mead algorithm is of (initial_param * 5e-2) which is too small when we want initial_param = 30 pix and step = 5 or 10.
# This function allows to choose the scale of the steps after the first movement
step_factor = float(initial_step) / float(initial_param * 5e-2)
real_param = initial_param + (param_from_optimizer - initial_param) * step_factor
return real_param
def Metric_Images(imageA, imageB, type=''):
data_A_list = load(imageA).get_data().tolist()
data_B_list = load(imageB).get_data().tolist()
# Define both list of intensity
list_A = []
list_B = []
for i in range(len(data_A_list)):
list_A = list_A + data_A_list[i]
list_B = list_B + data_B_list[i]
# Calculate metric depending on the type
if type == 'MeanSquares':
result_metric = 1.0 / (len(list_A)) * np.sum(np.array([list_A[i][0] - list_B[i][0] for i in range(len(list_A))])**2)
#result_metric = 1/(len(list_A)) * np.sum(np.array(list_A - list_B)**2)
if type == 'Correlation':
result_metric = 1.0 / (len(list_A)) * np.sum(np.absolute(np.array([list_A[i][0] - list_B[i][0] for i in range(len(list_A))])))
if type == 'MI':
logger.info(f"\nto do: MI")
# Return results
logger.info(f"\nResult of metric is: {str(result_metric)}")
return result_metric
def minimize_transform(params, points_dest, points_src, constraints):
"""
Cost function to minimize
:param params:
:param points_dest:
:param points_src:
:param constraints:
:return: sum of squared error between pair-wise landmarks
"""
# initialize dof
dof = [0, 0, 0, 0, 0, 0, 1, 1, 1]
# initialize dictionary to relate constraints index to dof
dict_dof = {'Tx': 0, 'Ty': 1, 'Tz': 2, 'Rx': 3, 'Ry': 4, 'Rz': 5, 'Sx': 6, 'Sy': 7, 'Sz': 8}
# extract constraints
list_constraints = constraints.split('_')
# loop across constraints and update dof
for i in range(len(list_constraints)):
dof[dict_dof[list_constraints[i]]] = params[i]
# convert dof to more intuitive variables
tx, ty, tz, alpha, beta, gamma, scx, scy, scz = dof[0], dof[1], dof[2], dof[3], dof[4], dof[5], dof[6], dof[7], dof[8]
# build rotation matrix
rotation_matrix = np.matrix([[np.cos(alpha) * np.cos(beta), np.cos(alpha) * np.sin(beta) * np.sin(gamma) - np.sin(alpha) * np.cos(gamma), np.cos(alpha) * np.sin(beta) * np.cos(gamma) + np.sin(alpha) * np.sin(gamma)],
[np.sin(alpha) * np.cos(beta), np.sin(alpha) * np.sin(beta) * np.sin(gamma) + np.cos(alpha) * np.cos(gamma), np.sin(alpha) * np.sin(beta) * np.cos(gamma) - np.cos(alpha) * np.sin(gamma)],
[-np.sin(beta), np.cos(beta) * np.sin(gamma), np.cos(beta) * np.cos(gamma)]])
# build scaling matrix
scaling_matrix = np.matrix([[scx, 0.0, 0.0], [0.0, scy, 0.0], [0.0, 0.0, scz]])
# compute rotation+scaling matrix
rotsc_matrix = scaling_matrix * rotation_matrix
# compute center of mass from moving points (src)
points_src_barycenter = np.mean(points_src, axis=0)
# apply transformation to moving points (src)
points_src_reg = ((rotsc_matrix * (np.matrix(points_src) - points_src_barycenter).T).T + points_src_barycenter) + np.matrix([tx, ty, tz])
# record SSE for later display
sse_results.append(SSE(np.matrix(points_dest), points_src_reg))
# return SSE
return SSE(np.matrix(points_dest), points_src_reg)
def getRigidTransformFromLandmarks(points_dest, points_src, constraints='Tx_Ty_Tz_Rx_Ry_Rz', verbose=0):
"""
Compute affine transformation to register landmarks
:param points_src:
:param points_dest:
:param constraints:
:param verbose: 0, 1, 2
:return: rotsc_matrix, translation_array, points_src_reg, points_src_barycenter
"""
# TODO: check input constraints
# initialize default parameters
init_param = [0, 0, 0, 0, 0, 0, 1, 1, 1]
# initialize parameters for optimizer
init_param_optimizer = []
# initialize dictionary to relate constraints index to dof
dict_dof = {'Tx': 0, 'Ty': 1, 'Tz': 2, 'Rx': 3, 'Ry': 4, 'Rz': 5, 'Sx': 6, 'Sy': 7, 'Sz': 8}
# extract constraints
list_constraints = constraints.split('_')
# loop across constraints and build initial_parameters
for i in range(len(list_constraints)):
init_param_optimizer.append(init_param[dict_dof[list_constraints[i]]])
# launch optimizer
# res = minimize(minimize_transform, x0=init_param_optimizer, args=(points_src, points_dest, constraints), method='Nelder-Mead', tol=1e-8, options={'xtol': 1e-8, 'ftol': 1e-8, 'maxiter': 10000, 'maxfev': 10000, 'disp': show})
res = minimize(minimize_transform, x0=init_param_optimizer, args=(points_dest, points_src, constraints), method='Powell', tol=1e-8, options={'xtol': 1e-8, 'ftol': 1e-8, 'maxiter': 100000, 'maxfev': 100000, 'disp': verbose})
# res = minimize(minAffineTransform, x0=initial_parameters, args=points, method='COBYLA', tol=1e-8, options={'tol': 1e-8, 'rhobeg': 0.1, 'maxiter': 100000, 'catol': 0, 'disp': show})
# loop across constraints and update dof
dof = init_param
for i in range(len(list_constraints)):
dof[dict_dof[list_constraints[i]]] = res.x[i]
# convert dof to more intuitive variables
tx, ty, tz, alpha, beta, gamma, scx, scy, scz = dof[0], dof[1], dof[2], dof[3], dof[4], dof[5], dof[6], dof[7], dof[8]
# convert results to intuitive variables
# tx, ty, tz, alpha, beta, gamma, scx, scy, scz = res.x[0], res.x[1], res.x[2], res.x[3], res.x[4], res.x[5], res.x[6], res.x[7], res.x[8]
# build translation matrix
translation_array = np.matrix([tx, ty, tz])
# build rotation matrix
rotation_matrix = np.matrix([[np.cos(alpha) * np.cos(beta), np.cos(alpha) * np.sin(beta) * np.sin(gamma) - np.sin(alpha) * np.cos(gamma), np.cos(alpha) * np.sin(beta) * np.cos(gamma) + np.sin(alpha) * np.sin(gamma)],
[np.sin(alpha) * np.cos(beta), np.sin(alpha) * np.sin(beta) * np.sin(gamma) + np.cos(alpha) * np.cos(gamma), np.sin(alpha) * np.sin(beta) * np.cos(gamma) - np.cos(alpha) * np.sin(gamma)],
[-np.sin(beta), np.cos(beta) * np.sin(gamma), np.cos(beta) * np.cos(gamma)]])
# build scaling matrix
scaling_matrix = np.matrix([[scx, 0.0, 0.0], [0.0, scy, 0.0], [0.0, 0.0, scz]])
# compute rotation+scaling matrix
rotsc_matrix = scaling_matrix * rotation_matrix
# compute center of mass from moving points (src)
points_src_barycenter = np.mean(points_src, axis=0)
# apply transformation to moving points (src)
points_src_reg = ((rotsc_matrix * (np.matrix(points_src) - points_src_barycenter).T).T + points_src_barycenter) + translation_array
logger.info(f"Matrix:\n {rotation_matrix}")
logger.info(f"Center:\n {points_src_barycenter}")
logger.info(f"Translation:\n {translation_array}")
if verbose == 2:
import matplotlib
# use Agg to prevent display
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
fig = plt.figure()
ax = fig.gca(projection='3d')
points_src_matrix = np.matrix(points_src)
points_dest_matrix = np.matrix(points_dest)
number_points = len(points_dest)
ax.scatter([points_dest_matrix[i, 0] for i in range(0, number_points)],
[points_dest_matrix[i, 1] for i in range(0, number_points)],
[points_dest_matrix[i, 2] for i in range(0, number_points)], c='g', marker='+', s=500, label='dest')
ax.scatter([points_src_matrix[i, 0] for i in range(0, number_points)],
[points_src_matrix[i, 1] for i in range(0, number_points)],
[points_src_matrix[i, 2] for i in range(0, number_points)], c='r', label='src')
ax.scatter([points_src_reg[i, 0] for i in range(0, number_points)],
[points_src_reg[i, 1] for i in range(0, number_points)],
[points_src_reg[i, 2] for i in range(0, number_points)], c='b', label='src_reg')
ax.set_xlabel('x')
ax.set_ylabel('y')
ax.set_zlabel('z')
ax.set_aspect('auto')
plt.legend()
# plt.show()
plt.savefig('getRigidTransformFromLandmarks_plot.png')
fig2 = plt.figure()
plt.plot(sse_results)
plt.grid()
plt.title('#Iterations: ' + str(res.nit) + ', #FuncEval: ' + str(res.nfev) + ', Error: ' + str(res.fun))
plt.show()
plt.savefig(os.path.join('getRigidTransformFromLandmarks_iterations.png'))
# transform numpy matrix to list structure because it is easier to handle
points_src_reg = points_src_reg.tolist()
return rotsc_matrix, translation_array, points_src_reg, points_src_barycenter
| mit |
nigroup/pypet | pypet/environment.py | 1 | 148936 | """ Module containing the environment to run experiments.
An :class:`~pypet.environment.Environment` provides an interface to run experiments based on
parameter exploration.
The environment contains and might even create a :class:`~pypet.trajectory.Trajectory`
container which can be filled with parameters and results (see :mod:`pypet.parameter`).
Instance of this trajectory are distributed to the user's job function to perform a single run
of an experiment.
An `Environment` is the handyman for scheduling, it can be used for multiprocessing and takes
care of organizational issues like logging.
"""
__author__ = 'Robert Meyer'
try:
import __main__ as main
except ImportError as exc:
main = None # We can end up here in an interactive IPython console
import os
import sys
import logging
import shutil
import multiprocessing as multip
import traceback
import hashlib
import time
import datetime
import inspect
try:
from sumatra.projects import load_project
from sumatra.programs import PythonExecutable
except ImportError:
load_project = None
PythonExecutable = None
try:
import dill
# If you do not set this log-level dill will flood any log file :-(
logging.getLogger(dill.__name__).setLevel(logging.WARNING)
except ImportError:
dill = None
try:
import psutil
except ImportError:
psutil = None
try:
import scoop
from scoop import futures, shared
except ImportError:
scoop = None
try:
import git
except ImportError:
git = None
try:
import zmq
except ImportError:
zmq = None
import pypet.compat as compat
import pypet.pypetconstants as pypetconstants
from pypet.pypetlogging import LoggingManager, HasLogger, simple_logging_config
from pypet.trajectory import Trajectory
from pypet.storageservice import HDF5StorageService, LazyStorageService
from pypet.utils.mpwrappers import QueueStorageServiceWriter, LockWrapper, \
PipeStorageServiceSender, PipeStorageServiceWriter, ReferenceWrapper, \
ReferenceStore, QueueStorageServiceSender, LockerServer, LockerClient, \
ForkAwareLockerClient, TimeOutLockerServer, QueuingClient, QueuingServer, \
ForkAwareQueuingClient
from pypet.utils.siginthandling import sigint_handling
from pypet.utils.gitintegration import make_git_commit
from pypet._version import __version__ as VERSION
from pypet.utils.decorators import deprecated, kwargs_api_change, prefix_naming
from pypet.utils.helpful_functions import is_debug, result_sort, format_time, port_to_tcp, \
racedirs
from pypet.utils.storagefactory import storage_factory
from pypet.utils.configparsing import parse_config
from pypet.parameter import Parameter
def _pool_single_run(kwargs):
"""Starts a pool single run and passes the storage service"""
wrap_mode = kwargs['wrap_mode']
traj = kwargs['traj']
traj.v_storage_service = _pool_single_run.storage_service
if wrap_mode == pypetconstants.WRAP_MODE_LOCAL:
# Free references from previous runs
traj.v_storage_service.free_references()
return _sigint_handling_single_run(kwargs)
def _frozen_pool_single_run(kwargs):
"""Single run wrapper for the frozen pool, makes a single run and passes kwargs"""
idx = kwargs.pop('idx')
frozen_kwargs = _frozen_pool_single_run.kwargs
frozen_kwargs.update(kwargs) # in case of `run_map`
# we need to update job's args and kwargs
traj = frozen_kwargs['traj']
traj.f_set_crun(idx)
return _sigint_handling_single_run(frozen_kwargs)
def _configure_pool(kwargs):
"""Configures the pool and keeps the storage service"""
_pool_single_run.storage_service = kwargs['storage_service']
_configure_niceness(kwargs)
_configure_logging(kwargs, extract=False)
def _configure_frozen_pool(kwargs):
"""Configures the frozen pool and keeps all kwargs"""
_frozen_pool_single_run.kwargs = kwargs
_configure_niceness(kwargs)
_configure_logging(kwargs, extract=False)
# Reset full copy to it's old value
traj = kwargs['traj']
traj.v_full_copy = kwargs['full_copy']
def _process_single_run(kwargs):
"""Wrapper function that first configures logging and starts a single run afterwards."""
_configure_niceness(kwargs)
_configure_logging(kwargs)
result_queue = kwargs['result_queue']
result = _sigint_handling_single_run(kwargs)
result_queue.put(result)
result_queue.close()
def _configure_frozen_scoop(kwargs):
"""Wrapper function that configures a frozen SCOOP set up.
Deletes of data if necessary.
"""
def _delete_old_scoop_rev_data(old_scoop_rev):
if old_scoop_rev is not None:
try:
elements = shared.elements
for key in elements:
var_dict = elements[key]
if old_scoop_rev in var_dict:
del var_dict[old_scoop_rev]
logging.getLogger('pypet.scoop').debug('Deleted old SCOOP data from '
'revolution `%s`.' % old_scoop_rev)
except AttributeError:
logging.getLogger('pypet.scoop').error('Could not delete old SCOOP data from '
'revolution `%s`.' % old_scoop_rev)
scoop_rev = kwargs.pop('scoop_rev')
# Check if we need to reconfigure SCOOP
try:
old_scoop_rev = _frozen_scoop_single_run.kwargs['scoop_rev']
configured = old_scoop_rev == scoop_rev
except (AttributeError, KeyError):
old_scoop_rev = None
configured = False
if not configured:
_frozen_scoop_single_run.kwargs = shared.getConst(scoop_rev, timeout=424.2)
frozen_kwargs = _frozen_scoop_single_run.kwargs
frozen_kwargs['scoop_rev'] = scoop_rev
frozen_kwargs['traj'].v_full_copy = frozen_kwargs['full_copy']
if not scoop.IS_ORIGIN:
_configure_niceness(frozen_kwargs)
_configure_logging(frozen_kwargs, extract=False)
_delete_old_scoop_rev_data(old_scoop_rev)
logging.getLogger('pypet.scoop').info('Configured Worker %s' % str(scoop.worker))
def _frozen_scoop_single_run(kwargs):
try:
_configure_frozen_scoop(kwargs)
idx = kwargs.pop('idx')
frozen_kwargs = _frozen_scoop_single_run.kwargs
frozen_kwargs.update(kwargs)
traj = frozen_kwargs['traj']
traj.f_set_crun(idx)
return _single_run(frozen_kwargs)
except Exception:
scoop.logger.exception('ERROR occurred during a single run!')
raise
def _scoop_single_run(kwargs):
"""Wrapper function for scoop, that does not configure logging"""
try:
try:
is_origin = scoop.IS_ORIGIN
except AttributeError:
# scoop is not properly started, i.e. with `python -m scoop...`
# in this case scoop uses default `map` function, i.e.
# the main process
is_origin = True
if not is_origin:
# configure logging and niceness if not the main process:
_configure_niceness(kwargs)
_configure_logging(kwargs)
return _single_run(kwargs)
except Exception:
scoop.logger.exception('ERROR occurred during a single run!')
raise
def _configure_logging(kwargs, extract=True):
"""Requests the logging manager to configure logging.
:param extract:
If naming data should be extracted from the trajectory
"""
try:
logging_manager = kwargs['logging_manager']
if extract:
logging_manager.extract_replacements(kwargs['traj'])
logging_manager.make_logging_handlers_and_tools(multiproc=True)
except Exception as exc:
sys.stderr.write('Could not configure logging system because of: %s' % repr(exc))
traceback.print_exc()
def _configure_niceness(kwargs):
"""Sets niceness of a process"""
niceness = kwargs['niceness']
if niceness is not None:
try:
try:
current = os.nice(0)
if niceness - current > 0:
# Under Linux you cannot decrement niceness if set elsewhere
os.nice(niceness-current)
except AttributeError:
# Fall back on psutil under Windows
psutil.Process().nice(niceness)
except Exception as exc:
sys.stderr.write('Could not configure niceness because of: %s' % repr(exc))
traceback.print_exc()
def _sigint_handling_single_run(kwargs):
"""Wrapper that allow graceful exits of single runs"""
try:
graceful_exit = kwargs['graceful_exit']
if graceful_exit:
sigint_handling.start()
if sigint_handling.hit:
result = (sigint_handling.SIGINT, None)
else:
result = _single_run(kwargs)
if sigint_handling.hit:
result = (sigint_handling.SIGINT, result)
return result
return _single_run(kwargs)
except:
# Log traceback of exception
pypet_root_logger = logging.getLogger('pypet')
pypet_root_logger.exception('ERROR occurred during a single run! ')
raise
def _single_run(kwargs):
""" Performs a single run of the experiment.
:param kwargs: Dict of arguments
traj: The trajectory containing all parameters set to the corresponding run index.
runfunc: The user's job function
runargs: The arguments handed to the user's job function (as *args)
runkwargs: The keyword arguments handed to the user's job function (as **kwargs)
clean_up_after_run: Whether to clean up after the run
automatic_storing: Whether or not the data should be automatically stored
result_queue: A queue object to store results into in case a pool is used, otherwise None
:return:
Results computed by the user's job function which are not stored into the trajectory.
Returns a nested tuple of run index and result and run information:
``((traj.v_idx, result), run_information_dict)``
"""
pypet_root_logger = logging.getLogger('pypet')
traj = kwargs['traj']
runfunc = kwargs['runfunc']
runargs = kwargs['runargs']
kwrunparams = kwargs['runkwargs']
clean_up_after_run = kwargs['clean_up_runs']
automatic_storing = kwargs['automatic_storing']
wrap_mode = kwargs['wrap_mode']
idx = traj.v_idx
total_runs = len(traj)
pypet_root_logger.info('\n=========================================\n '
'Starting single run #%d of %d '
'\n=========================================\n' % (idx, total_runs))
# Measure start time
traj.f_start_run(turn_into_run=True)
# Run the job function of the user
result = runfunc(traj, *runargs, **kwrunparams)
# Store data if desired
if automatic_storing:
traj.f_store()
# Add the index to the result and the run information
if wrap_mode == pypetconstants.WRAP_MODE_LOCAL:
result = ((traj.v_idx, result),
traj.f_get_run_information(traj.v_idx, copy=False),
traj.v_storage_service.references)
traj.v_storage_service.free_references()
else:
result = ((traj.v_idx, result),
traj.f_get_run_information(traj.v_idx, copy=False))
# Measure time of finishing
traj.f_finalize_run(store_meta_data=False,
clean_up=clean_up_after_run)
pypet_root_logger.info('\n=========================================\n '
'Finished single run #%d of %d '
'\n=========================================\n' % (idx, total_runs))
return result
def _wrap_handling(kwargs):
""" Starts running a queue handler and creates a log file for the queue."""
_configure_logging(kwargs, extract=False)
# Main job, make the listener to the queue start receiving message for writing to disk.
handler=kwargs['handler']
graceful_exit = kwargs['graceful_exit']
# import cProfile as profile
# profiler = profile.Profile()
# profiler.enable()
if graceful_exit:
sigint_handling.start()
handler.run()
# profiler.disable()
# profiler.dump_stats('./queue.profile2')
@prefix_naming
class Environment(HasLogger):
""" The environment to run a parameter exploration.
The first thing you usually do is to create and environment object that takes care about
the running of the experiment. You can provide the following arguments:
:param trajectory:
String or trajectory instance. If a string is supplied, a novel
trajectory is created with that name.
Note that the comment and the dynamically imported classes (see below) are only considered
if a novel trajectory is created. If you supply a trajectory instance, these fields
can be ignored.
:param add_time: If True the current time is added to the trajectory name if created new.
:param comment: Comment added to the trajectory if a novel trajectory is created.
:param dynamic_imports:
Only considered if a new trajectory is created.
If you've written custom parameters or results that need to be loaded
dynamically during runtime, the module containing the class
needs to be specified here as a list of classes or strings
naming classes and there module paths.
For example:
`dynamic_imports =
['pypet.parameter.PickleParameter', MyCustomParameter]`
If you only have a single class to import, you do not need
the list brackets:
`dynamic_imports = 'pypet.parameter.PickleParameter'`
:param wildcard_functions:
Dictionary of wildcards like `$` and corresponding functions that are called upon
finding such a wildcard. For example, to replace the `$` aka `crun` wildcard,
you can pass the following: ``wildcard_functions = {('$', 'crun'): myfunc}``.
Your wildcard function `myfunc` must return a unique run name as a function of
a given integer run index. Moreover, your function must also return a unique
*dummy* name for the run index being `-1`.
Of course, you can define your
own wildcards like `wildcard_functions = {('$mycard', 'mycard'): myfunc)}.
These are not required to return a unique name for each run index, but can be used
to group runs into buckets by returning the same name for several run indices.
Yet, all wildcard functions need to return a dummy name for the index `-1`.
:param automatic_storing:
If `True` the trajectory will be stored at the end of the simulation and
single runs will be stored after their completion.
Be aware of data loss if you set this to `False` and not
manually store everything.
:param log_config:
Can be path to a logging `.ini` file specifying the logging configuration.
For an example of such a file see :ref:`more-on-logging`.
Can also be a dictionary that is accepted by the built-in logging module.
Set to `None` if you don't want *pypet* to configure logging.
If not specified, the default settings are used. Moreover, you can manually tweak the
default settings without creating a new `ini` file.
Instead of the `log_config` parameter, pass a ``log_folder``,
a list of `logger_names` and corresponding `log_levels` to fine grain
the loggers to which the default settings apply.
For example:
``log_folder='logs', logger_names='('pypet', 'MyCustomLogger'), log_levels=(logging.ERROR, logging.INFO)``
You can further disable multiprocess logging via setting ``log_multiproc=False``.
:param log_stdout:
Whether the output of ``stdout`` should be recorded into the log files.
Disable if only logging statement should be recorded. Note if you work with an
interactive console like *IPython*, it is a good idea to set ``log_stdout=False``
to avoid messing up the console output.
Can also be a tuple: ('mylogger', 10), specifying a logger name as well as a log-level.
The log-level defines with what level `stdout` is logged, it is *not* a filter.
:param report_progress:
If progress of runs and an estimate of the remaining time should be shown.
Can be `True` or `False` or a triple ``(10, 'pypet', logging.Info)`` where the first number
is the percentage and update step of the resulting progressbar and
the second one is a corresponding logger name with which the progress should be logged.
If you use `'print'`, the `print` statement is used instead. The third value
specifies the logging level (level of logging statement *not* a filter)
with which the progress should be logged.
Note that the progress is based on finished runs. If you use the `QUEUE` wrapping
in case of multiprocessing and if storing takes long, the estimate of the remaining
time might not be very accurate.
:param multiproc:
Whether or not to use multiprocessing. Default is ``False``.
Besides the wrap_mode (see below) that deals with how
storage to disk is carried out in case of multiprocessing, there
are two ways to do multiprocessing. By using a fixed pool of
processes (choose `use_pool=True`, default option) or by spawning an
individual process for every run and parameter combination (`use_pool=False`).
The former will only spawn not more than *ncores* processes and all simulation runs are
sent over to to the pool one after the other.
This requires all your data to be pickled.
If your data cannot be pickled (which could be the case for some
BRIAN networks, for instance) choose `use_pool=False` (also make sure to set
`continuable=False`). This will also spawn
at most *ncores* processes at a time, but as soon as a process terminates
a new one is spawned with the next parameter combination. Be aware that you will
have as many logfiles in your logfolder as processes were spawned.
If your simulation returns results besides storing results directly into the trajectory,
these returned results still need to be pickled.
:param ncores:
If multiproc is ``True``, this specifies the number of processes that will be spawned
to run your experiment. Note if you use QUEUE mode (see below) the queue process
is not included in this number and will add another extra process for storing.
If you have *psutil* installed, you can set `ncores=0` to let *psutil* determine
the number of CPUs available.
:param use_scoop:
If python should be used in a SCOOP_ framework to distribute runs amond a cluster
or multiple servers. If so you need to start your script via
``python -m scoop my_script.py``. Currently, SCOOP_ only works with
``'LOCAL'`` ``wrap_mode`` (see below).
.. _SCOOP: http://scoop.readthedocs.org/
:param use_pool:
Whether to use a fixed pool of processes or whether to spawn a new process
for every run. Use the former if you perform many runs (50k and more)
which are in terms of memory and runtime inexpensive.
Be aware that everything you use must be picklable.
Use the latter for fewer runs (50k and less) and which are longer lasting
and more expensive runs (in terms of memory consumption).
In case your operating system allows forking, your data does not need to be
picklable.
If you choose ``use_pool=False`` you can also make use of the `cap` values,
see below.
:param freeze_input:
Can be set to ``True`` if the run function as well as all additional arguments
are immutable. This will prevent the trajectory from getting pickled again and again.
Thus, the run function, the trajectory, as well as all arguments are passed to the pool
or SCOOP workers at initialisation. Works also under `run_map`.
In this case the iterable arguments are, of course, not frozen but passed for every run.
:param timeout:
Timeout parameter in seconds passed on to SCOOP_ and ``'NETLOCK'`` wrapping.
Leave `None` for no timeout. After `timeout` seconds SCOOP_ will assume
that a single run failed and skip waiting for it.
Moreover, if using ``'NETLOCK'`` wrapping, after `timeout` seconds
a lock is automatically released and again
available for other waiting processes.
:param cpu_cap:
If `multiproc=True` and `use_pool=False` you can specify a maximum cpu utilization between
0.0 (excluded) and 100.0 (included) as fraction of maximum capacity. If the current cpu
usage is above the specified level (averaged across all cores),
*pypet* will not spawn a new process and wait until
activity falls below the threshold again. Note that in order to avoid dead-lock at least
one process will always be running regardless of the current utilization.
If the threshold is crossed a warning will be issued. The warning won't be repeated as
long as the threshold remains crossed.
For example `cpu_cap=70.0`, `ncores=3`, and currently on average 80 percent of your cpu are
used. Moreover, let's assume that at the moment only 2 processes are
computing single runs simultaneously. Due to the usage of 80 percent of your cpu,
*pypet* will wait until cpu usage drops below (or equal to) 70 percent again
until it starts a third process to carry out another single run.
The parameters `memory_cap` and `swap_cap` are analogous. These three thresholds are
combined to determine whether a new process can be spawned. Accordingly, if only one
of these thresholds is crossed, no new processes will be spawned.
To disable the cap limits simply set all three values to 100.0.
You need the psutil_ package to use this cap feature. If not installed and you
choose cap values different from 100.0 a ValueError is thrown.
:param memory_cap:
Cap value of RAM usage. If more RAM than the threshold is currently in use, no new
processes are spawned. Can also be a tuple ``(limit, memory_per_process)``,
first value is the cap value (between 0.0 and 100.0),
second one is the estimated memory per process in mega bytes (MB).
If an estimate is given a new process is not started if
the threshold would be crossed including the estimate.
:param swap_cap:
Analogous to `cpu_cap` but the swap memory is considered.
:param niceness:
If you are running on a UNIX based system or you have psutil_ (under Windows) installed,
you can choose a niceness value to prioritize the child processes executing the
single runs in case you use multiprocessing.
Under Linux these usually range from 0 (highest priority)
to 19 (lowest priority). For Windows values check the psutil_ homepage.
Leave ``None`` if you don't care about niceness.
Under Linux the `niceness`` value is a minimum value, if the OS decides to
nice your program (maybe you are running on a server) *pypet* does not try to
decrease the `niceness` again.
:param wrap_mode:
If multiproc is ``True``, specifies how storage to disk is handled via
the storage service.
There are a few options:
:const:`~pypet.pypetconstants.WRAP_MODE_QUEUE`: ('QUEUE')
Another process for storing the trajectory is spawned. The sub processes
running the individual single runs will add their results to a
multiprocessing queue that is handled by an additional process.
Note that this requires additional memory since the trajectory
will be pickled and send over the queue for storage!
:const:`~pypet.pypetconstants.WRAP_MODE_LOCK`: ('LOCK')
Each individual process takes care about storage by itself. Before
carrying out the storage, a lock is placed to prevent the other processes
to store data. Accordingly, sometimes this leads to a lot of processes
waiting until the lock is released.
Allows loading of data during runs.
:const:`~pypet.pypetconstants.WRAP_MODE_PIPE`: ('PIPE)
Experimental mode based on a single pipe. Is faster than ``'QUEUE'`` wrapping
but data corruption may occur, does not work under Windows
(since it relies on forking).
:const:`~pypet.pypetconstant.WRAP_MODE_LOCAL` ('LOCAL')
Data is not stored during the single runs but after they completed.
Storing is only performed in the main process.
Note that removing data during a single run has no longer an effect on memory
whatsoever, because there are references kept for all data
that is supposed to be stored.
:const:`~pypet.pypetconstant.WRAP_MODE_NETLOCK` ('NETLOCK')
Similar to 'LOCK' but locks can be shared across a network.
Sharing is established by running a lock server that
distributes locks to the individual processes.
Can be used with SCOOP_ if all hosts have access to
a shared home directory.
Allows loading of data during runs.
:const:`~pypet.pypetconstant.WRAP_MODE_NETQUEUE` ('NETQUEUE')
Similar to 'QUEUE' but data can be shared across a network.
Sharing is established by running a queue server that
distributes locks to the individual processes.
If you don't want wrapping at all use
:const:`~pypet.pypetconstants.WRAP_MODE_NONE` ('NONE')
:param queue_maxsize:
Maximum size of the Storage Queue, in case of ``'QUEUE'`` wrapping.
``0`` means infinite, ``-1`` (default) means the educated guess of ``2 * ncores``.
:param port:
Port to be used by lock server in case of ``'NETLOCK'`` wrapping.
Can be a single integer as well as a tuple ``(7777, 9999)`` to specify
a range of ports from which to pick a random one.
Leave `None` for using pyzmq's default range.
In case automatic determining of the host's IP address fails,
you can also pass the full address (including the protocol and
the port) of the host in the network like ``'tcp://127.0.0.1:7777'``.
:param gc_interval:
Interval (in runs or storage operations) with which ``gc.collect()``
should be called in case of the ``'LOCAL'``, ``'QUEUE'``, or ``'PIPE'`` wrapping.
Leave ``None`` for never.
In case of ``'LOCAL'`` wrapping ``1`` means after every run ``2``
after every second run, and so on. In case of ``'QUEUE'`` or ``'PIPE''`` wrapping
``1`` means after every store operation,
``2`` after every second store operation, and so on.
Only calls ``gc.collect()`` in the main (if ``'LOCAL'`` wrapping)
or the queue/pipe process. If you need to garbage collect data within your single runs,
you need to manually call ``gc.collect()``.
Usually, there is no need to set this parameter since the Python garbage collection
works quite nicely and schedules collection automatically.
:param clean_up_runs:
In case of single core processing, whether all results under groups named `run_XXXXXXXX`
should be removed after the completion of
the run. Note in case of multiprocessing this happens anyway since the single run
container will be destroyed after finishing of the process.
Moreover, if set to ``True`` after post-processing it is checked if there is still data
under `run_XXXXXXXX` and this data is removed if the trajectory is expanded.
:param immediate_postproc:
If you use post- and multiprocessing, you can immediately start analysing the data
as soon as the trajectory runs out of tasks, i.e. is fully explored but the final runs
are not completed. Thus, while executing the last batch of parameter space points,
you can already analyse the finished runs. This is especially helpful if you perform some
sort of adaptive search within the parameter space.
The difference to normal post-processing is that you do not have to wait until all
single runs are finished, but your analysis already starts while there are still
runs being executed. This can be a huge time saver especially if your simulation time
differs a lot between individual runs. Accordingly, you don't have to wait for a very
long run to finish to start post-processing.
In case you use immediate postprocessing, the storage service of your trajectory is still
multiprocessing safe (except when using the wrap_mode ``'LOCAL'``).
Accordingly, you could even use multiprocessing in your immediate post-processing phase
if you dare, like use a multiprocessing pool_, for instance.
Note that after the execution of the final run, your post-processing routine will
be called again as usual.
**IMPORTANT**: If you use immediate post-processing, the results that are passed to
your post-processing function are not sorted by their run indices but by finishing time!
.. _pool: https://docs.python.org/2/library/multiprocessing.html
:param resumable:
Whether the environment should take special care to allow to resume or continue
crashed trajectories. Default is ``False``.
You need to install dill_ to use this feature. *dill* will make snapshots
of your simulation function as well as the passed arguments.
BE AWARE that dill is still rather experimental!
Assume you run experiments that take a lot of time.
If during your experiments there is a power failure,
you can resume your trajectory after the last single run that was still
successfully stored via your storage service.
The environment will create several `.ecnt` and `.rcnt` files in a folder that you specify
(see below).
Using this data you can resume crashed trajectories.
In order to resume trajectories use :func:`~pypet.environment.Environment.resume`.
Be aware that your individual single runs must be completely independent of one
another to allow continuing to work. Thus, they should **NOT** be based on shared data
that is manipulated during runtime (like a multiprocessing manager list)
in the positional and keyword arguments passed to the run function.
If you use post-processing, the expansion of trajectories and continuing of trajectories
is NOT supported properly. There is no guarantee that both work together.
.. _dill: https://pypi.python.org/pypi/dill
:param resume_folder:
The folder where the resume files will be placed. Note that *pypet* will create
a sub-folder with the name of the environment.
:param delete_resume:
If true, *pypet* will delete the resume files after a successful simulation.
:param storage_service:
Pass a given storage service or a class constructor (default ``HDF5StorageService``)
if you want the environment to create
the service for you. The environment will pass the
additional keyword arguments you pass directly to the constructor.
If the trajectory already has a service attached,
the one from the trajectory will be used.
:param git_repository:
If your code base is under git version control you can specify here the path
(relative or absolute) to the folder containing the `.git` directory as a string.
Note in order to use this tool you need GitPython_.
If you set this path the environment will trigger a commit of your code base
adding all files that are currently under version control.
Similar to calling `git add -u` and `git commit -m 'My Message'` on the command line.
The user can specify the commit message, see below. Note that the message
will be augmented by the name and the comment of the trajectory.
A commit will only be triggered if there are changes detected within your
working copy.
This will also add information about the revision to the trajectory, see below.
.. _GitPython: http://pythonhosted.org/GitPython/0.3.1/index.html
:param git_message:
Message passed onto git command. Only relevant if a new commit is triggered.
If no changes are detected, the information about the previous commit and the previous
commit message are added to the trajectory and this user passed message is discarded.
:param git_fail:
If `True` the program fails instead of triggering a commit if there are not committed
changes found in the code base. In such a case a `GitDiffError` is raised.
:param sumatra_project:
If your simulation is managed by sumatra_, you can specify here the path to the
*sumatra* root folder. Note that you have to initialise the *sumatra* project at least
once before via ``smt init MyFancyProjectName``.
*pypet* will automatically ad ALL parameters to the *sumatra* record.
If a parameter is explored, the WHOLE range is added instead of the default value.
*pypet* will add the label and reason (only if provided, see below)
to your trajectory as config parameters.
.. _sumatra : http://neuralensemble.org/sumatra/
:param sumatra_reason:
You can add an additional reason string that is added to the *sumatra* record.
Regardless if `sumatra_reason` is empty, the name of the trajectory, the comment
as well as a list of all explored parameters is added to the *sumatra* record.
Note that the augmented label is not stored into the trajectory as config
parameter, but the original one (without the name of the trajectory, the comment,
and the list of explored parameters) in case it is not the empty string.
:param sumatra_label:
The label or name of your sumatra record. Set to `None` if you want sumatra
to choose a label in form of a timestamp for you.
:param do_single_runs:
Whether you intend to actually to compute single runs with the trajectory.
If you do not intend to do single runs, than set to ``False`` and the
environment won't add config information like number of processors to the
trajectory.
:param graceful_exit:
If ``True`` hitting CTRL+C (i.e.sending SIGINT) will not terminate the program
immediately. Instead, active single runs will be finished and stored before
shutdown. Hitting CTRL+C twice will raise a KeyboardInterrupt as usual.
:param lazy_debug:
If ``lazy_debug=True`` and in case you debug your code (aka you use pydevd and
the expression ``'pydevd' in sys.modules`` is ``True``), the environment will use the
:class:`~pypet.storageservice.LazyStorageService` instead of the HDF5 one.
Accordingly, no files are created and your trajectory and results are not saved.
This allows faster debugging and prevents *pypet* from blowing up your hard drive with
trajectories that you probably not want to use anyway since you just debug your code.
The Environment will automatically add some config settings to your trajectory.
Thus, you can always look up how your trajectory was run. This encompasses most of the above
named parameters as well as some information about the environment.
This additional information includes
a timestamp as well as a SHA-1 hash code that uniquely identifies your environment.
If you use git integration, the SHA-1 hash code will be the one from your git commit.
Otherwise the code will be calculated from the trajectory name, the current time, and your
current *pypet* version.
The environment will be named `environment_XXXXXXX_XXXX_XX_XX_XXhXXmXXs`. The first seven
`X` are the first seven characters of the SHA-1 hash code followed by a human readable
timestamp.
All information about the environment can be found in your trajectory under
`config.environment.environment_XXXXXXX_XXXX_XX_XX_XXhXXmXXs`. Your trajectory could
potentially be run by several environments due to merging or extending an existing trajectory.
Thus, you will be able to track how your trajectory was built over time.
Git information is added to your trajectory as follows:
* git.commit_XXXXXXX_XXXX_XX_XX_XXh_XXm_XXs.hexsha
The SHA-1 hash of the commit.
`commit_XXXXXXX_XXXX_XX_XX_XXhXXmXXs` is mapped to the first seven items of the SHA-1 hash
and the formatted data of the commit, e.g. `commit_7ef7hd4_2015_10_21_16h29m00s`.
* git.commit_XXXXXXX_XXXX_XX_XX_XXh_XXm_XXs.name_rev
String describing the commits hexsha based on the closest reference
* git.commit_XXXXXXX_XXXX_XX_XX_XXh_XXm_XXs.committed_date
Commit date as Unix Epoch data
* git.commit_XXXXXXX_XXXX_XX_XX_XXh_XXm_XXs.message
The commit message
Moreover, if you use the standard ``HDF5StorageService`` you can pass the following keyword
arguments in ``**kwargs``:
:param filename:
The name of the hdf5 file. If none is specified the default
`./hdf5/the_name_of_your_trajectory.hdf5` is chosen. If `filename` contains only a path
like `filename='./myfolder/', it is changed to
`filename='./myfolder/the_name_of_your_trajectory.hdf5'`.
:param file_title: Title of the hdf5 file (only important if file is created new)
:param overwrite_file:
If the file already exists it will be overwritten. Otherwise,
the trajectory will simply be added to the file and already
existing trajectories are **not** deleted.
:param encoding:
Format to encode and decode unicode strings stored to disk.
The default ``'utf8'`` is highly recommended.
:param complevel:
You can specify your compression level. 0 means no compression
and 9 is the highest compression level. See `PyTables Compression`_ for a detailed
description.
.. _`PyTables Compression`: http://pytables.github.io/usersguide/optimization.html#compression-issues
:param complib:
The library used for compression. Choose between *zlib*, *blosc*, and *lzo*.
Note that 'blosc' and 'lzo' are usually faster than 'zlib' but it may be the case that
you can no longer open your hdf5 files with third-party applications that do not rely
on PyTables.
:param shuffle:
Whether or not to use the shuffle filters in the HDF5 library.
This normally improves the compression ratio.
:param fletcher32:
Whether or not to use the *Fletcher32* filter in the HDF5 library.
This is used to add a checksum on hdf5 data.
:param pandas_format:
How to store pandas data frames. Either in 'fixed' ('f') or 'table' ('t') format.
Fixed format allows fast reading and writing but disables querying the hdf5 data and
appending to the store (with other 3rd party software other than *pypet*).
:param purge_duplicate_comments:
If you add a result via :func:`~pypet.naturalnaming.ResultGroup.f_add_result` or a derived
parameter :func:`~pypet.naturalnaming.DerivedParameterGroup.f_add_derived_parameter` and
you set a comment, normally that comment would be attached to each and every instance.
This can produce a lot of unnecessary overhead if the comment is the same for every
instance over all runs. If `purge_duplicate_comments=1` than only the comment of the
first result or derived parameter instance created in a run is stored or comments
that differ from this first comment.
For instance, during a single run you call
`traj.f_add_result('my_result`,42, comment='Mostly harmless!')`
and the result will be renamed to `results.run_00000000.my_result`. After storage
in the node associated with this result in your hdf5 file, you will find the comment
`'Mostly harmless!'` there. If you call
`traj.f_add_result('my_result',-43, comment='Mostly harmless!')`
in another run again, let's say run 00000001, the name will be mapped to
`results.run_00000001.my_result`. But this time the comment will not be saved to disk
since `'Mostly harmless!'` is already part of the very first result with the name
'results.run_00000000.my_result'.
Note that the comments will be compared and storage will only be discarded if the strings
are exactly the same.
If you use multiprocessing, the storage service will take care that the comment for
the result or derived parameter with the lowest run index will be considered regardless
of the order of the finishing of your runs. Note that this only works properly if all
comments are the same. Otherwise the comment in the overview table might not be the one
with the lowest run index.
You need summary tables (see below) to be able to purge duplicate comments.
This feature only works for comments in *leaf* nodes (aka Results and Parameters).
So try to avoid to add comments in *group* nodes within single runs.
:param summary_tables:
Whether the summary tables should be created, i.e. the 'derived_parameters_runs_summary',
and the `results_runs_summary`.
The 'XXXXXX_summary' tables give a summary about all results or derived parameters.
It is assumed that results and derived parameters with equal names in individual runs
are similar and only the first result or derived parameter that was created
is shown as an example.
The summary table can be used in combination with `purge_duplicate_comments` to only store
a single comment for every result with the same name in each run, see above.
:param small_overview_tables:
Whether the small overview tables should be created.
Small tables are giving overview about 'config','parameters',
'derived_parameters_trajectory', ,
'results_trajectory', 'results_runs_summary'.
Note that these tables create some overhead. If you want very small hdf5 files set
`small_overview_tables` to False.
:param large_overview_tables:
Whether to add large overview tables. This encompasses information about every derived
parameter, result, and the explored parameter in every single run.
If you want small hdf5 files set to ``False`` (default).
:param results_per_run:
Expected results you store per run. If you give a good/correct estimate
storage to hdf5 file is much faster in case you store LARGE overview tables.
Default is 0, i.e. the number of results is not estimated!
:param derived_parameters_per_run:
Analogous to the above.
Finally, you can also pass properties of the trajectory, like ``v_with_links=True``
(you can leave the prefix ``v_``, i.e. ``with_links`` works, too).
Thus, you can change the settings of the trajectory immediately.
.. _psutil: http://psutil.readthedocs.org/
"""
@parse_config
@kwargs_api_change('delete_continue', 'delete_resume')
@kwargs_api_change('continue_folder', 'resume_folder')
@kwargs_api_change('continuable', 'resumable')
@kwargs_api_change('freeze_pool_input', 'freeze_input')
@kwargs_api_change('use_hdf5', 'storage_service')
@kwargs_api_change('dynamically_imported_classes', 'dynamic_imports')
@kwargs_api_change('pandas_append')
@simple_logging_config
def __init__(self, trajectory='trajectory',
add_time=False,
comment='',
dynamic_imports=None,
wildcard_functions=None,
automatic_storing=True,
log_config=pypetconstants.DEFAULT_LOGGING,
log_stdout=False,
report_progress = (5, 'pypet', logging.INFO),
multiproc=False,
ncores=1,
use_scoop=False,
use_pool=False,
freeze_input=False,
timeout=None,
cpu_cap=100.0,
memory_cap=100.0,
swap_cap=100.0,
niceness=None,
wrap_mode=pypetconstants.WRAP_MODE_LOCK,
queue_maxsize=-1,
port=None,
gc_interval=None,
clean_up_runs=True,
immediate_postproc=False,
resumable=False,
resume_folder=None,
delete_resume=True,
storage_service=HDF5StorageService,
git_repository=None,
git_message='',
git_fail=False,
sumatra_project=None,
sumatra_reason='',
sumatra_label=None,
do_single_runs=True,
graceful_exit=False,
lazy_debug=False,
**kwargs):
if git_repository is not None and git is None:
raise ValueError('You cannot specify a git repository without having '
'GitPython. Please install the GitPython package to use '
'pypet`s git integration.')
if resumable and dill is None:
raise ValueError('Please install `dill` if you want to use the feature to '
'resume halted trajectories')
if load_project is None and sumatra_project is not None:
raise ValueError('`sumatra` package has not been found, either install '
'`sumatra` or set `sumatra_project=None`.')
if sumatra_label is not None and '.' in sumatra_label:
raise ValueError('Your sumatra label is not allowed to contain dots.')
if wrap_mode == pypetconstants.WRAP_MODE_NETLOCK and zmq is None:
raise ValueError('You need to install `zmq` for `NETLOCK` wrapping.')
if (use_pool or use_scoop) and immediate_postproc:
raise ValueError('You CANNOT perform immediate post-processing if you DO '
'use a pool or scoop.')
if use_pool and use_scoop:
raise ValueError('You can either `use_pool` or `use_scoop` or none of both, '
'but not both together')
if use_scoop and scoop is None:
raise ValueError('Cannot use `scoop` because it is not installed.')
if (wrap_mode not in (pypetconstants.WRAP_MODE_NONE,
pypetconstants.WRAP_MODE_LOCAL,
pypetconstants.WRAP_MODE_LOCK,
pypetconstants.WRAP_MODE_NETLOCK) and
resumable):
raise ValueError('Continuing trajectories does only work with '
'`LOCK`, `NETLOCK` or `LOCAL`wrap mode.')
if resumable and not automatic_storing:
raise ValueError('Continuing only works with `automatic_storing=True`')
if use_scoop and wrap_mode not in (pypetconstants.WRAP_MODE_LOCAL,
pypetconstants.WRAP_MODE_NONE,
pypetconstants.WRAP_MODE_NETLOCK,
pypetconstants.WRAP_MODE_NETQUEUE):
raise ValueError('SCOOP mode only works with `LOCAL`, `NETLOCK` or '
'`NETQUEUE` wrap mode!')
if niceness is not None and not hasattr(os, 'nice') and psutil is None:
raise ValueError('You cannot set `niceness` if your operating system does not '
'support the `nice` operation. Alternatively you can install '
'`psutil`.')
if freeze_input and not use_pool and not use_scoop:
raise ValueError('You can only use `freeze_input=True` if you either use '
'a pool or SCOOP.')
if not isinstance(memory_cap, tuple):
memory_cap = (memory_cap, 0.0)
if (cpu_cap <= 0.0 or cpu_cap > 100.0 or
memory_cap[0] <= 0.0 or memory_cap[0] > 100.0 or
swap_cap <= 0.0 or swap_cap > 100.0):
raise ValueError('Please choose cap values larger than 0.0 '
'and smaller or equal to 100.0.')
check_usage = cpu_cap < 100.0 or memory_cap[0] < 100.0 or swap_cap < 100.0
if check_usage and psutil is None:
raise ValueError('You cannot enable monitoring without having '
'installed psutil. Please install psutil or set '
'cpu_cap, memory_cap, and swap_cap to 100.0')
if ncores == 0 and psutil is None:
raise ValueError('You cannot set `ncores=0` for auto detection of CPUs if you did not '
'installed psutil. Please install psutil or '
'set `ncores` manually.')
if port is not None and wrap_mode not in (pypetconstants.WRAP_MODE_NETLOCK,
pypetconstants.WRAP_MODE_NETQUEUE):
raise ValueError('You can only specify a port for the `NETLOCK` wrapping.')
if use_scoop and graceful_exit:
raise ValueError('You cannot exit gracefully using SCOOP.')
unused_kwargs = set(kwargs.keys())
self._logging_manager = LoggingManager(log_config=log_config,
log_stdout=log_stdout,
report_progress=report_progress)
self._logging_manager.check_log_config()
self._logging_manager.add_null_handler()
self._set_logger()
self._map_arguments = False
self._stop_iteration = False # Marker to cancel
# iteration in case of Keyboard interrupt
self._graceful_exit = graceful_exit
# Helper attributes defined later on
self._start_timestamp = None
self._finish_timestamp = None
self._runtime = None
self._cpu_cap = cpu_cap
self._memory_cap = memory_cap
if psutil is not None:
# Total memory in MB
self._total_memory = psutil.virtual_memory().total / 1024.0 / 1024.0
# Estimated memory needed by each process as ratio
self._est_per_process = self._memory_cap[1] / self._total_memory * 100.0
self._swap_cap = swap_cap
self._check_usage = check_usage
self._last_cpu_check = 0.0
self._last_cpu_usage = 0.0
if self._check_usage:
# For initialisation
self._estimate_cpu_utilization()
self._niceness = niceness
self._sumatra_project = sumatra_project
self._sumatra_reason = sumatra_reason
self._sumatra_label = sumatra_label
self._loaded_sumatatra_project = None
self._sumatra_record = None
self._runfunc = None
self._args = ()
self._kwargs = {}
self._postproc = None
self._postproc_args = ()
self._postproc_kwargs = {}
self._immediate_postproc = immediate_postproc
self._user_pipeline = False
self._git_repository = git_repository
self._git_message = git_message
self._git_fail = git_fail
# Check if a novel trajectory needs to be created.
if isinstance(trajectory, compat.base_type):
# Create a new trajectory
self._traj = Trajectory(trajectory,
add_time=add_time,
dynamic_imports=dynamic_imports,
wildcard_functions=wildcard_functions,
comment=comment)
self._timestamp = self.trajectory.v_timestamp # Timestamp of creation
self._time = self.trajectory.v_time # Formatted timestamp
else:
self._traj = trajectory
# If no new trajectory is created the time of the environment differs
# from the trajectory and must be computed from the current time.
init_time = time.time()
formatted_time = format_time(init_time)
self._timestamp = init_time
self._time = formatted_time
# In case the user provided a git repository path, a git commit is performed
# and the environment's hexsha is taken from the commit if the commit was triggered by
# this particular environment, otherwise a new one is generated
if self._git_repository is not None:
new_commit, self._hexsha = make_git_commit(self, self._git_repository,
self._git_message, self._git_fail)
# Identifier hexsha
else:
new_commit = False
if not new_commit:
# Otherwise we need to create a novel hexsha
self._hexsha = hashlib.sha1(compat.tobytes(self.trajectory.v_name +
str(self.trajectory.v_timestamp) +
str(self.timestamp) +
VERSION)).hexdigest()
# Create the name of the environment
short_hexsha = self._hexsha[0:7]
name = 'environment'
self._name = name + '_' + str(short_hexsha) + '_' + self._time # Name of environment
# The trajectory should know the hexsha of the current environment.
# Thus, for all runs, one can identify by which environment they were run.
self._traj._environment_hexsha = self._hexsha
self._traj._environment_name = self._name
self._logging_manager.extract_replacements(self._traj)
self._logging_manager.remove_null_handler()
self._logging_manager.make_logging_handlers_and_tools()
# Drop a message if we made a commit. We cannot drop the message directly after the
# commit, because the logging files do not exist yet,
# and we want this commit to be tracked
if self._git_repository is not None:
if new_commit:
self._logger.info('Triggered NEW GIT commit `%s`.' % str(self._hexsha))
else:
self._logger.info('No changes detected, added PREVIOUS GIT commit `%s`.' %
str(self._hexsha))
# Create the storage service
if storage_service is True: # to allow compatibility with older python versions, i.e. old
# keyword use_hdf5
storage_service = HDF5StorageService
if self._traj.v_storage_service is not None:
# Use the service of the trajectory
self._logger.info('Found storage service attached to Trajectory. Will use '
'this storage service.')
self._storage_service = self.trajectory.v_storage_service
else:
# Create a new service
self._storage_service, unused_factory_kwargs = storage_factory(storage_service,
self._traj, **kwargs)
unused_kwargs = unused_kwargs - (set(kwargs.keys()) - unused_factory_kwargs)
if lazy_debug and is_debug():
self._storage_service = LazyStorageService()
self._traj.v_storage_service = self._storage_service
# Create resume path if desired
self._resumable = resumable
if self._resumable:
if resume_folder is None:
resume_folder = os.path.join(os.getcwd(), 'resume')
resume_path = os.path.join(resume_folder, self._traj.v_name)
else:
resume_path = None
self._resume_folder = resume_folder
self._resume_path = resume_path
self._delete_resume = delete_resume
# Check multiproc
self._multiproc = multiproc
if ncores == 0:
# Let *pypet* detect CPU count via psutil
ncores = psutil.cpu_count()
self._logger.info('Determined CPUs automatically, found `%d` cores.' % ncores)
self._ncores = ncores
if queue_maxsize == -1:
# Educated guess of queue size
queue_maxsize = 2 * ncores
self._queue_maxsize = queue_maxsize
if wrap_mode is None:
# None cannot be used in HDF5 files, accordingly we need a string representation
wrap_mode = pypetconstants.WRAP_MODE_NONE
self._wrap_mode = wrap_mode
# Whether to use a pool of processes
self._use_pool = use_pool
self._use_scoop = use_scoop
self._freeze_input = freeze_input
self._gc_interval = gc_interval
self._multiproc_wrapper = None # The wrapper Service
self._do_single_runs = do_single_runs
self._automatic_storing = automatic_storing
self._clean_up_runs = clean_up_runs
if (wrap_mode == pypetconstants.WRAP_MODE_NETLOCK and
not isinstance(port, compat.base_type)):
url = port_to_tcp(port)
self._logger.info('Determined lock-server URL automatically, it is `%s`.' % url)
else:
url = port
self._url = url
self._timeout = timeout
# self._deep_copy_data = False # deep_copy_data # For future reference deep_copy_arguments
# Notify that in case of lazy debuggin we won't record anythin
if lazy_debug and is_debug():
self._logger.warning('Using the LazyStorageService, nothing will be saved to disk.')
# Current run index to avoid quadratic runtime complexity in case of re-running
self._current_idx = 0
self._trajectory_name = self._traj.v_name
for kwarg in list(unused_kwargs):
try:
val = kwargs[kwarg]
self._traj.f_set_properties(**{kwarg: val})
self._logger.info('Set trajectory property `%s` to `%s`.' % (kwarg, str(val)))
unused_kwargs.remove(kwarg)
except AttributeError:
pass
if len(unused_kwargs) > 0:
raise ValueError('You passed keyword arguments to the environment that you '
'did not use. The following keyword arguments were ignored: '
'`%s`' % str(unused_kwargs))
# Add all config data to the environment
self._add_config()
self._logger.info('Environment initialized.')
def _add_config(self):
# Add config data to the trajectory
if self._do_single_runs:
# Only add parameters if we actually want single runs to be performed
config_name = 'environment.%s.multiproc' % self.name
self._traj.f_add_config(Parameter, config_name, self._multiproc,
comment='Whether or not to use multiprocessing.').f_lock()
if self._multiproc:
config_name = 'environment.%s.use_pool' % self.name
self._traj.f_add_config(Parameter, config_name, self._use_pool,
comment='Whether to use a pool of processes or '
'spawning individual processes for '
'each run.').f_lock()
config_name = 'environment.%s.use_scoop' % self.name
self._traj.f_add_config(Parameter, config_name, self._use_scoop,
comment='Whether to use scoop to launch single '
'runs').f_lock()
if self._niceness is not None:
config_name = 'environment.%s.niceness' % self.name
self._traj.f_add_config(Parameter, config_name, self._niceness,
comment='Niceness value of child processes.').f_lock()
if self._use_pool:
config_name = 'environment.%s.freeze_input' % self.name
self._traj.f_add_config(Parameter, config_name, self._freeze_input,
comment='If inputs to each run are static and '
'are not mutated during each run, '
'can speed up pool running.').f_lock()
elif self._use_scoop:
pass
else:
config_name = 'environment.%s.cpu_cap' % self.name
self._traj.f_add_config(Parameter, config_name, self._cpu_cap,
comment='Maximum cpu usage beyond '
'which no new processes '
'are spawned.').f_lock()
config_name = 'environment.%s.memory_cap' % self.name
self._traj.f_add_config(Parameter, config_name, self._memory_cap,
comment='Tuple, first entry: Maximum RAM usage beyond '
'which no new processes are spawned; '
'second entry: Estimated usage per '
'process in MB. 0 if not estimated.').f_lock()
config_name = 'environment.%s.swap_cap' % self.name
self._traj.f_add_config(Parameter, config_name, self._swap_cap,
comment='Maximum Swap memory usage beyond '
'which no new '
'processes are spawned').f_lock()
config_name = 'environment.%s.immediate_postprocessing' % self.name
self._traj.f_add_config(Parameter, config_name, self._immediate_postproc,
comment='Whether to use immediate '
'postprocessing.').f_lock()
config_name = 'environment.%s.ncores' % self.name
self._traj.f_add_config(Parameter, config_name, self._ncores,
comment='Number of processors in case of '
'multiprocessing').f_lock()
config_name = 'environment.%s.wrap_mode' % self.name
self._traj.f_add_config(Parameter, config_name, self._wrap_mode,
comment='Multiprocessing mode (if multiproc),'
' i.e. whether to use QUEUE'
' or LOCK or NONE'
' for thread/process safe storing').f_lock()
if (self._wrap_mode == pypetconstants.WRAP_MODE_QUEUE or
self._wrap_mode == pypetconstants.WRAP_MODE_PIPE):
config_name = 'environment.%s.queue_maxsize' % self.name
self._traj.f_add_config(Parameter, config_name, self._queue_maxsize,
comment='Maximum size of Storage Queue/Pipe in case of '
'multiprocessing and QUEUE/PIPE wrapping').f_lock()
if self._wrap_mode == pypetconstants.WRAP_MODE_NETLOCK:
config_name = 'environment.%s.url' % self.name
self._traj.f_add_config(Parameter, config_name, self._url,
comment='URL of lock distribution server, including '
'protocol and port.').f_lock()
if self._wrap_mode == pypetconstants.WRAP_MODE_NETLOCK or self._use_scoop:
config_name = 'environment.%s.timeout' % self.name
timeout = self._timeout
if timeout is None:
timeout = -1.0
self._traj.f_add_config(Parameter, config_name, timeout,
comment='Timout for scoop and NETLOCK, '
'-1.0 means no timeout.').f_lock()
if (self._gc_interval and
(self._wrap_mode == pypetconstants.WRAP_MODE_LOCAL or
self._wrap_mode == pypetconstants.WRAP_MODE_QUEUE or
self._wrap_mode == pypetconstants.WRAP_MODE_PIPE)):
config_name = 'environment.%s.gc_interval' % self.name
self._traj.f_add_config(Parameter, config_name, self._gc_interval,
comment='Intervals with which ``gc.collect()`` '
'is called.').f_lock()
config_name = 'environment.%s.clean_up_runs' % self._name
self._traj.f_add_config(Parameter, config_name, self._clean_up_runs,
comment='Whether or not results should be removed after the '
'completion of a single run. '
'You are not advised to set this '
'to `False`. Only do it if you know what you are '
'doing.').f_lock()
config_name = 'environment.%s.resumable' % self._name
self._traj.f_add_config(Parameter, config_name, self._resumable,
comment='Whether or not resume files should '
'be created. If yes, everything is '
'handled by `dill`.').f_lock()
config_name = 'environment.%s.graceful_exit' % self._name
self._traj.f_add_config(Parameter, config_name, self._graceful_exit,
comment='Whether or not to allow graceful handling '
'of `SIGINT` (`CTRL+C`).').f_lock()
config_name = 'environment.%s.trajectory.name' % self.name
self._traj.f_add_config(Parameter, config_name, self.trajectory.v_name,
comment='Name of trajectory').f_lock()
config_name = 'environment.%s.trajectory.timestamp' % self.name
self._traj.f_add_config(Parameter, config_name, self.trajectory.v_timestamp,
comment='Timestamp of trajectory').f_lock()
config_name = 'environment.%s.timestamp' % self.name
self._traj.f_add_config(Parameter, config_name, self.timestamp,
comment='Timestamp of environment creation').f_lock()
config_name = 'environment.%s.hexsha' % self.name
self._traj.f_add_config(Parameter, config_name, self.hexsha,
comment='SHA-1 identifier of the environment').f_lock()
config_name = 'environment.%s.automatic_storing' % self.name
if not self._traj.f_contains('config.' + config_name):
self._traj.f_add_config(Parameter, config_name, self._automatic_storing,
comment='If trajectory should be stored automatically in the '
'end.').f_lock()
try:
config_name = 'environment.%s.script' % self.name
self._traj.f_add_config(Parameter, config_name, main.__file__,
comment='Name of the executed main script').f_lock()
except AttributeError:
pass # We end up here if we use pypet within an ipython console
for package_name, version in pypetconstants.VERSIONS_TO_STORE.items():
config_name = 'environment.%s.versions.%s' % (self.name, package_name)
self._traj.f_add_config(Parameter, config_name, version,
comment='Particular version of a package or distribution '
'used during experiment. N/A if package could not '
'be imported.').f_lock()
self._traj.config.environment.v_comment = 'Settings for the different environments ' \
'used to run the experiments'
def __repr__(self):
"""String representation of environment"""
repr_string = '<%s %s for Trajectory %s>' % (self.__class__.__name__, self.name,
self.trajectory.v_name)
return repr_string
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.disable_logging()
def disable_logging(self, remove_all_handlers=True):
"""Removes all logging handlers and stops logging to files and logging stdout.
:param remove_all_handlers:
If `True` all logging handlers are removed.
If you want to keep the handlers set to `False`.
"""
self._logging_manager.finalize(remove_all_handlers)
@kwargs_api_change('continue_folder', 'resume_folder')
def resume(self, trajectory_name=None, resume_folder=None):
"""Resumes crashed trajectories.
:param trajectory_name:
Name of trajectory to resume, if not specified the name passed to the environment
is used. Be aware that if `add_time=True` the name you passed to the environment is
altered and the current date is added.
:param resume_folder:
The folder where resume files can be found. Do not pass the name of the sub-folder
with the trajectory name, but to the name of the parental folder.
If not specified the resume folder passed to the environment is used.
:return:
List of the individual results returned by your run function.
Returns a LIST OF TUPLES, where first entry is the run idx and second entry
is the actual result. In case of multiprocessing these are not necessarily
ordered according to their run index, but ordered according to their finishing time.
Does not contain results stored in the trajectory!
In order to access these simply interact with the trajectory object,
potentially after calling`~pypet.trajectory.Trajectory.f_update_skeleton`
and loading all results at once with :func:`~pypet.trajectory.f_load`
or loading manually with :func:`~pypet.trajectory.f_load_items`.
Even if you use multiprocessing without a pool the results returned by
`runfunc` still need to be pickled.
"""
if trajectory_name is None:
self._trajectory_name = self.trajectory.v_name
else:
self._trajectory_name = trajectory_name
if resume_folder is not None:
self._resume_folder = resume_folder
return self._execute_runs(None)
@property
def trajectory(self):
""" The trajectory of the Environment"""
return self._traj
@property
def traj(self):
""" Equivalent to env.trajectory"""
return self.trajectory
@property
def current_idx(self):
"""The current run index that is the next one to be executed.
Can be set manually to make the environment consider old non-completed ones.
"""
return self._current_idx
@current_idx.setter
def current_idx(self, idx):
self._current_idx = idx
@property
def hexsha(self):
"""The SHA1 identifier of the environment.
It is identical to the SHA1 of the git commit.
If version control is not used, the environment hash is computed from the
trajectory name, the current timestamp and your current *pypet* version."""
return self._hexsha
@property
def time(self):
""" Time of the creation of the environment, human readable."""
return self._time
@property
def timestamp(self):
"""Time of creation as python datetime float"""
return self._timestamp
@property
def name(self):
""" Name of the Environment"""
return self._name
def add_postprocessing(self, postproc, *args, **kwargs):
""" Adds a post processing function.
The environment will call this function via
``postproc(traj, result_list, *args, **kwargs)`` after the completion of the
single runs.
This function can load parts of the trajectory id needed and add additional results.
Moreover, the function can be used to trigger an expansion of the trajectory.
This can be useful if the user has an `optimization` task.
Either the function calls `f_expand` directly on the trajectory or returns
an dictionary. If latter `f_expand` is called by the environment.
Note that after expansion of the trajectory, the post-processing function is called
again (and again for further expansions). Thus, this allows an iterative approach
to parameter exploration.
Note that in case post-processing is called after all runs have been executed,
the storage service of the trajectory is no longer multiprocessing safe.
If you want to use multiprocessing in your post-processing you can still
manually wrap the storage service with the :class:`~pypet.environment.MultiprocessWrapper`.
In case you use **immediate** postprocessing, the storage service of your trajectory
is still multiprocessing safe (except when using the wrap_mode ``'LOCAL'``).
Accordingly, you could even use multiprocessing in your immediate post-processing phase
if you dare, like use a multiprocessing pool_, for instance.
You can easily check in your post-processing function if the storage service is
multiprocessing safe via the ``multiproc_safe`` attribute, i.e.
``traj.v_storage_service.multiproc_safe``.
:param postproc:
The post processing function
:param args:
Additional arguments passed to the post-processing function
:param kwargs:
Additional keyword arguments passed to the postprocessing function
:return:
"""
self._postproc = postproc
self._postproc_args = args
self._postproc_kwargs = kwargs
def pipeline(self, pipeline):
""" You can make *pypet* supervise your whole experiment by defining a pipeline.
`pipeline` is a function that defines the entire experiment. From pre-processing
including setting up the trajectory over defining the actual simulation runs to
post processing.
The `pipeline` function needs to return TWO tuples with a maximum of three entries each.
For example:
::
return (runfunc, args, kwargs), (postproc, postproc_args, postproc_kwargs)
Where `runfunc` is the actual simulation function thet gets passed the trajectory
container and potentially additional arguments `args` and keyword arguments `kwargs`.
This will be run by your environment with all parameter combinations.
`postproc` is a post processing function that handles your computed results.
The function must accept as arguments the trajectory container, a list of
results (list of tuples (run idx, result) ) and potentially
additional arguments `postproc_args` and keyword arguments `postproc_kwargs`.
As for :func:`~pypet.environment.Environment.f_add_postproc`, this function can
potentially extend the trajectory.
If you don't want to apply post-processing, your pipeline function can also simply
return the run function and the arguments:
::
return runfunc, args, kwargs
Or
::
return runfunc, args
Or
::
return runfunc
``return runfunc, kwargs`` does NOT work, if you don't want to pass `args` do
``return runfunc, (), kwargs``.
Analogously combinations like
::
return (runfunc, args), (postproc,)
work as well.
:param pipeline:
The pipleine function, taking only a single argument `traj`.
And returning all functions necessary for your experiment.
:return:
List of the individual results returned by `runfunc`.
Returns a LIST OF TUPLES, where first entry is the run idx and second entry
is the actual result. In case of multiprocessing these are not necessarily
ordered according to their run index, but ordered according to their finishing time.
Does not contain results stored in the trajectory!
In order to access these simply interact with the trajectory object,
potentially after calling :func:`~pypet.trajectory.Trajectory.f_update_skeleton`
and loading all results at once with :func:`~pypet.trajectory.f_load`
or loading manually with :func:`~pypet.trajectory.f_load_items`.
Even if you use multiprocessing without a pool the results returned by
`runfunc` still need to be pickled.
Results computed from `postproc` are not returned. `postproc` should not
return any results except dictionaries if the trajectory should be expanded.
"""
self._user_pipeline = True
self._map_arguments = False
return self._execute_runs(pipeline)
def pipeline_map(self, pipeline):
"""Creates a pipeline with iterable arguments"""
self._user_pipeline = True
self._map_arguments = True
return self._execute_runs(pipeline)
def run(self, runfunc, *args, **kwargs):
""" Runs the experiments and explores the parameter space.
:param runfunc: The task or job to do
:param args: Additional arguments (not the ones in the trajectory) passed to `runfunc`
:param kwargs:
Additional keyword arguments (not the ones in the trajectory) passed to `runfunc`
:return:
List of the individual results returned by `runfunc`.
Returns a LIST OF TUPLES, where first entry is the run idx and second entry
is the actual result. They are always ordered according to the run index.
Does not contain results stored in the trajectory!
In order to access these simply interact with the trajectory object,
potentially after calling`~pypet.trajectory.Trajectory.f_update_skeleton`
and loading all results at once with :func:`~pypet.trajectory.f_load`
or loading manually with :func:`~pypet.trajectory.f_load_items`.
If you use multiprocessing without a pool the results returned by
`runfunc` still need to be pickled.
"""
pipeline = lambda traj: ((runfunc, args, kwargs),
(self._postproc, self._postproc_args, self._postproc_kwargs))
self._user_pipeline = False
self._map_arguments = False
return self._execute_runs(pipeline)
def run_map(self, runfunc, *iter_args, **iter_kwargs):
"""Calls runfunc with different args and kwargs each time.
Similar to `:func:`~pypet.environment.Environment.run`
but all ``iter_args`` and ``iter_kwargs`` need to be iterables,
iterators, or generators that return new arguments for each run.
"""
if len(iter_args) == 0 and len(iter_kwargs) == 0:
raise ValueError('Use `run` if you don`t have any other arguments.')
pipeline = lambda traj: ((runfunc, iter_args, iter_kwargs),
(self._postproc, self._postproc_args, self._postproc_kwargs))
self._user_pipeline = False
self._map_arguments = True
return self._execute_runs(pipeline)
def _trigger_resume_snapshot(self):
""" Makes the trajectory continuable in case the user wants that"""
dump_dict = {}
dump_filename = os.path.join(self._resume_path, 'environment.ecnt')
# Store the trajectory before the first runs
prev_full_copy = self._traj.v_full_copy
dump_dict['full_copy'] = prev_full_copy
self._traj.v_full_copy = True
prev_storage_service = self._traj.v_storage_service
self._traj.v_storage_service = self._storage_service
dump_dict['trajectory'] = self._traj
dump_dict['args'] = self._args
dump_dict['kwargs'] = self._kwargs
dump_dict['runfunc'] = self._runfunc
dump_dict['postproc'] = self._postproc
dump_dict['postproc_args'] = self._postproc_args
dump_dict['postproc_kwargs'] = self._postproc_kwargs
dump_dict['start_timestamp'] = self._start_timestamp
dump_file = open(dump_filename, 'wb')
dill.dump(dump_dict, dump_file, protocol=2)
dump_file.flush()
dump_file.close()
self._traj.v_full_copy = prev_full_copy
self._traj.v_storage_service = prev_storage_service
def _prepare_sumatra(self):
""" Prepares a sumatra record """
reason = self._sumatra_reason
if reason:
reason += ' -- '
if self._traj.v_comment:
commentstr = ' (`%s`)' % self._traj.v_comment
else:
commentstr = ''
reason += 'Trajectory %s%s -- Explored Parameters: %s' % \
(self._traj.v_name,
commentstr,
str(compat.listkeys(self._traj._explored_parameters)))
self._logger.info('Preparing sumatra record with reason: %s' % reason)
self._sumatra_reason = reason
self._loaded_sumatatra_project = load_project(self._sumatra_project)
if self._traj.f_contains('parameters', shortcuts=False):
param_dict = self._traj.parameters.f_to_dict(fast_access=False)
for param_name in compat.listkeys(param_dict):
param = param_dict[param_name]
if param.f_has_range():
param_dict[param_name] = param.f_get_range()
else:
param_dict[param_name] = param.f_get()
else:
param_dict = {}
relpath = os.path.relpath(sys.modules['__main__'].__file__, self._sumatra_project)
executable = PythonExecutable(path=sys.executable)
self._sumatra_record = self._loaded_sumatatra_project.new_record(
parameters=param_dict,
main_file=relpath,
executable=executable,
label=self._sumatra_label,
reason=reason)
def _finish_sumatra(self):
""" Saves a sumatra record """
finish_time = self._start_timestamp - self._finish_timestamp
self._sumatra_record.duration = finish_time
self._sumatra_record.output_data = self._sumatra_record.datastore.find_new_data(self._sumatra_record.timestamp)
self._loaded_sumatatra_project.add_record(self._sumatra_record)
self._loaded_sumatatra_project.save()
sumatra_label = self._sumatra_record.label
config_name = 'sumatra.record_%s.label' % str(sumatra_label)
conf_list = []
if not self._traj.f_contains('config.' + config_name):
conf1 = self._traj.f_add_config(Parameter, config_name, str(sumatra_label),
comment='The label of the sumatra record')
conf_list.append(conf1)
if self._sumatra_reason:
config_name = 'sumatra.record_%s.reason' % str(sumatra_label)
if not self._traj.f_contains('config.' + config_name):
conf2 = self._traj.f_add_config(Parameter, config_name,
str(self._sumatra_reason),
comment='Reason of sumatra run.')
conf_list.append(conf2)
if self._automatic_storing and conf_list:
self._traj.f_store_items(conf_list)
self._logger.info('Saved sumatra project record with reason: '
'%s' % str(self._sumatra_reason))
def _prepare_resume(self):
""" Prepares the continuation of a crashed trajectory """
if not self._resumable:
raise RuntimeError('If you create an environment to resume a run, you need to '
'set `continuable=True`.')
if not self._do_single_runs:
raise RuntimeError('You cannot resume a run if you did create an environment '
'with `do_single_runs=False`.')
self._resume_path = os.path.join(self._resume_folder, self._trajectory_name)
cnt_filename = os.path.join(self._resume_path, 'environment.ecnt')
cnt_file = open(cnt_filename, 'rb')
resume_dict = dill.load(cnt_file)
cnt_file.close()
traj = resume_dict['trajectory']
# We need to update the information about the trajectory name
config_name = 'config.environment.%s.trajectory.name' % self.name
if self._traj.f_contains(config_name, shortcuts=False):
param = self._traj.f_get(config_name, shortcuts=False)
param.f_unlock()
param.f_set(traj.v_name)
param.f_lock()
config_name = 'config.environment.%s.trajectory.timestamp' % self.name
if self._traj.f_contains(config_name, shortcuts=False):
param = self._traj.f_get(config_name, shortcuts=False)
param.f_unlock()
param.f_set(traj.v_timestamp)
param.f_lock()
# Merge the information so that we keep a record about the current environment
if not traj.config.environment.f_contains(self.name, shortcuts=False):
traj._merge_config(self._traj)
self._traj = traj
# User's job function
self._runfunc = resume_dict['runfunc']
# Arguments to the user's job function
self._args = resume_dict['args']
# Keyword arguments to the user's job function
self._kwargs = resume_dict['kwargs']
# Postproc Function
self._postproc = resume_dict['postproc']
# Postprog args
self._postproc_args = resume_dict['postproc_args']
# Postproc Kwargs
self._postproc_kwargs = resume_dict['postproc_kwargs']
# Unpack the trajectory
self._traj.v_full_copy = resume_dict['full_copy']
# Load meta data
self._traj.f_load(load_parameters=pypetconstants.LOAD_NOTHING,
load_derived_parameters=pypetconstants.LOAD_NOTHING,
load_results=pypetconstants.LOAD_NOTHING,
load_other_data=pypetconstants.LOAD_NOTHING)
# Now we have to reconstruct previous results
result_list = []
full_filename_list = []
for filename in os.listdir(self._resume_path):
_, ext = os.path.splitext(filename)
if ext != '.rcnt':
continue
full_filename = os.path.join(self._resume_path, filename)
cnt_file = open(full_filename, 'rb')
result_list.append(dill.load(cnt_file))
cnt_file.close()
full_filename_list.append(full_filename)
new_result_list = []
for result_tuple in result_list:
run_information = result_tuple[1]
self._traj._update_run_information(run_information)
new_result_list.append(result_tuple[0])
result_sort(new_result_list)
# Add a config parameter signalling that an experiment was resumed, and how many of them
config_name = 'environment.%s.resumed' % self.name
if not config_name in self._traj:
self._traj.f_add_config(Parameter, config_name, True,
comment='Added if a crashed trajectory was continued.')
self._logger.info('I will resume trajectory `%s`.' % self._traj.v_name)
return new_result_list
def _prepare_runs(self, pipeline):
"""Prepares the running of an experiment
:param pipeline:
A pipeline function that defines the task
"""
pip_result = pipeline(self._traj) # Call the pipeline function
# Extract the task to do from the pipeline result
raise_error = False
if pip_result is None:
if self._do_single_runs:
raise RuntimeError('Your pipeline function did return `None`.'
'Accordingly, I assume you just do data analysis. '
'Please create and environment with `do_single_runs=False`.')
self._logger.info('Your pipeline returned no runfunction, I assume you do some '
'sort of data analysis and will skip any single run execution.')
self._runfunc = None
return
elif (len(pip_result) == 2 and
isinstance(pip_result[0], tuple) and
isinstance(pip_result[1], tuple)):
# Extract the run and post-processing functions and arguments
run_tuple = pip_result[0]
self._runfunc = run_tuple[0]
if len(run_tuple) > 1:
self._args = run_tuple[1]
if len(run_tuple) > 2:
self._kwargs = run_tuple[2]
if len(run_tuple) > 3:
raise_error = True
postproc_tuple = pip_result[1]
if len(postproc_tuple) > 0:
self._postproc = postproc_tuple[0]
if len(postproc_tuple) > 1:
self._postproc_args = postproc_tuple[1]
if len(postproc_tuple) > 2:
self._postproc_kwargs = postproc_tuple[2]
if len(run_tuple) > 3:
raise_error = True
elif len(pip_result) <= 3:
self._runfunc = pip_result[0]
if len(pip_result) > 1:
self._args = pip_result[1]
if len(pip_result) > 2:
self._kwargs = pip_result[2]
else:
raise_error = True
if raise_error:
raise RuntimeError('Your pipeline result is not understood please return'
'a tuple of maximum length 3: ``(runfunc, args, kwargs)`` '
'Or return two tuple of maximum length 3: '
'``(runfunc, args, kwargs), '
'(postproc, postproc_args, postproc_kwargs)')
if self._runfunc is not None and not self._do_single_runs:
raise RuntimeError('You cannot make a run if you did create an environment '
'with `do_single_runs=False`.')
if self._resumable:
racedirs(self._resume_path)
if os.listdir(self._resume_path):
raise RuntimeError('Your resume folder `%s` needs '
'to be empty to allow continuing!' % self._resume_path)
if self._user_pipeline:
self._logger.info('\n************************************************************\n'
'STARTING PPREPROCESSING for trajectory\n`%s`'
'\n************************************************************\n' %
self._traj.v_name)
# Make some preparations (locking of parameters etc) and store the trajectory
self._logger.info('I am preparing the Trajectory for the experiment and '
'initialise the store.')
self._traj._prepare_experiment()
self._logger.info('Initialising the storage for the trajectory.')
self._traj.f_store(only_init=True)
def _show_progress(self, n, total_runs):
"""Displays a progressbar"""
self._logging_manager.show_progress(n, total_runs)
def _make_kwargs(self, **kwargs):
"""Creates the keyword arguments for the single run handling"""
result_dict = {'traj': self._traj,
'logging_manager': self._logging_manager,
'runfunc': self._runfunc,
'runargs': self._args,
'runkwargs': self._kwargs,
'clean_up_runs': self._clean_up_runs,
'automatic_storing': self._automatic_storing,
'wrap_mode': self._wrap_mode,
'niceness': self._niceness,
'graceful_exit': self._graceful_exit}
result_dict.update(kwargs)
if self._multiproc:
if self._use_pool or self._use_scoop:
if self._use_scoop:
del result_dict['graceful_exit']
if self._freeze_input:
# Remember the full copy setting for the frozen input to
# change this back once the trajectory is received by
# each process
result_dict['full_copy'] = self.traj.v_full_copy
if self._map_arguments:
del result_dict['runargs']
del result_dict['runkwargs']
else:
result_dict['clean_up_runs'] = False
if self._use_pool:
# Needs only be deleted in case of using a pool but necessary for scoop
del result_dict['logging_manager']
del result_dict['niceness']
else:
result_dict['clean_up_runs'] = False
return result_dict
def _make_index_iterator(self, start_run_idx):
"""Returns an iterator over the run indices that are not completed"""
total_runs = len(self._traj)
for n in compat.xrange(start_run_idx, total_runs):
self._current_idx = n + 1
if self._stop_iteration:
self._logger.debug('I am stopping new run iterations now!')
break
if not self._traj._is_completed(n):
self._traj.f_set_crun(n)
yield n
else:
self._logger.debug('Run `%d` has already been completed, I am skipping it.' % n)
def _make_iterator(self, start_run_idx, copy_data=False, **kwargs):
""" Returns an iterator over all runs and yields the keyword arguments """
if (not self._freeze_input) or (not self._multiproc):
kwargs = self._make_kwargs(**kwargs)
def _do_iter():
if self._map_arguments:
self._args = tuple(iter(arg) for arg in self._args)
for key in compat.listkeys(self._kwargs):
self._kwargs[key] = iter(self._kwargs[key])
for idx in self._make_index_iterator(start_run_idx):
iter_args = tuple(next(x) for x in self._args)
iter_kwargs = {}
for key in self._kwargs:
iter_kwargs[key] = next(self._kwargs[key])
kwargs['runargs'] = iter_args
kwargs['runkwargs'] = iter_kwargs
if self._freeze_input:
# Frozen pool needs current run index
kwargs['idx'] = idx
if copy_data:
copied_kwargs = kwargs.copy()
if not self._freeze_input:
copied_kwargs['traj'] = self._traj.f_copy(copy_leaves='explored',
with_links=True)
yield copied_kwargs
else:
yield kwargs
else:
for idx in self._make_index_iterator(start_run_idx):
if self._freeze_input:
# Frozen pool needs current run index
kwargs['idx'] = idx
if copy_data:
copied_kwargs = kwargs.copy()
if not self._freeze_input:
copied_kwargs['traj'] = self._traj.f_copy(copy_leaves='explored',
with_links=True)
yield copied_kwargs
else:
yield kwargs
return _do_iter()
def _execute_postproc(self, results):
""" Executes a postprocessing function
:param results:
List of tuples containing the run indices and the results
:return:
1. Whether to new single runs, since the trajectory was enlarged
2. Index of next new run
3. Number of new runs
"""
repeat = False
start_run_idx = 0
new_runs = 0
# Do some finalization
self._traj._finalize(store_meta_data=True)
old_traj_length = len(self._traj)
postproc_res = self._postproc(self._traj, results,
*self._postproc_args, **self._postproc_kwargs)
if postproc_res is None:
pass
elif isinstance(postproc_res, dict):
if postproc_res:
self._traj.f_expand(postproc_res)
elif isinstance(postproc_res, tuple):
expand_dict = postproc_res[0]
if len(postproc_res) > 1:
self._args = postproc_res[1]
if len(postproc_res) > 2:
self._kwargs = postproc_res[2]
if len(postproc_res) > 3:
self._postproc_args = postproc_res[3]
if len(postproc_res) > 4:
self._postproc_kwargs = postproc_res[4]
if expand_dict:
self._traj.f_expand(expand_dict)
else:
self._logger.error('Your postproc result `%s` was not understood.' % str(postproc_res))
new_traj_length = len(self._traj)
if new_traj_length != old_traj_length:
start_run_idx = old_traj_length
repeat = True
if self._resumable:
self._logger.warning('Continuing a trajectory AND expanding it during runtime is '
'NOT supported properly, there is no guarantee that this '
'works!')
self._traj.f_store(only_init=True)
new_traj_length = len(self._traj)
new_runs = new_traj_length - old_traj_length
return repeat, start_run_idx, new_runs
def _estimate_cpu_utilization(self):
"""Estimates the cpu utilization within the last 500ms"""
now = time.time()
if now - self._last_cpu_check >= 0.5:
try:
self._last_cpu_usage = psutil.cpu_percent()
self._last_cpu_check = now
except (psutil.NoSuchProcess, ZeroDivisionError):
pass # psutil sometimes produces ZeroDivisionErrors, has been fixed in newer
# Versions but we want to support older as well
return self._last_cpu_usage
def _estimate_memory_utilization(self, process_dict):
"""Estimates memory utilization to come if process was started"""
n_processes = len(process_dict)
total_utilization = psutil.virtual_memory().percent
sum = 0.0
for proc in compat.itervalues(process_dict):
try:
sum += psutil.Process(proc.pid).memory_percent()
except (psutil.NoSuchProcess, ZeroDivisionError):
pass
curr_all_processes = sum
missing_utilization = max(0.0, n_processes * self._est_per_process - curr_all_processes)
estimated_utilization = total_utilization
estimated_utilization += missing_utilization
estimated_utilization += self._est_per_process
return estimated_utilization
def _execute_runs(self, pipeline):
""" Starts the individual single runs.
Starts runs sequentially or initiates multiprocessing.
:param pipeline:
A pipeline function producing the run function the corresponding arguments
and postprocessing function and arguments
:return:
List of tuples, where each tuple contains the run idx and the result.
"""
if self._start_timestamp is None:
self._start_timestamp = time.time()
if self._map_arguments and self._resumable:
raise ValueError('You cannot use `run_map` or `pipeline_map` in combination '
'with continuing option.')
if self._sumatra_project is not None:
self._prepare_sumatra()
if pipeline is not None:
results = []
self._prepare_runs(pipeline)
else:
results = self._prepare_resume()
if self._runfunc is not None:
self._traj._run_by_environment = True
if self._graceful_exit:
sigint_handling.start()
try:
self._inner_run_loop(results)
finally:
self._traj._run_by_environment = False
self._stop_iteration = False
if self._graceful_exit:
sigint_handling.finalize()
self._add_wildcard_config()
if self._automatic_storing:
self._logger.info('\n************************************************************\n'
'STARTING FINAL STORING of trajectory\n`%s`'
'\n************************************************************\n' %
self._traj.v_name)
self._traj.f_store()
self._logger.info('\n************************************************************\n'
'FINISHED FINAL STORING of trajectory\n`%s`.'
'\n************************************************************\n' %
self._traj.v_name)
self._finish_timestamp = time.time()
findatetime = datetime.datetime.fromtimestamp(self._finish_timestamp)
startdatetime = datetime.datetime.fromtimestamp(self._start_timestamp)
self._runtime = str(findatetime - startdatetime)
conf_list = []
config_name = 'environment.%s.start_timestamp' % self.name
if not self._traj.f_contains('config.' + config_name):
conf1 = self._traj.f_add_config(Parameter, config_name, self._start_timestamp,
comment='Timestamp of starting of experiment '
'(when the actual simulation was '
'started (either by calling `run`, '
'`resume`, or `pipeline`).')
conf_list.append(conf1)
config_name = 'environment.%s.finish_timestamp' % self.name
if not self._traj.f_contains('config.' + config_name):
conf2 = self._traj.f_add_config(Parameter, config_name, self._finish_timestamp,
comment='Timestamp of finishing of an experiment.')
else:
conf2 = self._traj.f_get('config.' + config_name)
conf2.f_unlock()
conf2.f_set(self._finish_timestamp)
conf_list.append(conf2)
config_name = 'environment.%s.runtime' % self.name
if not self._traj.f_contains('config.' + config_name):
conf3 = self._traj.f_add_config(Parameter, config_name, self._runtime,
comment='Runtime of whole experiment.')
else:
conf3 = self._traj.f_get('config.' + config_name)
conf3.f_unlock()
conf3.f_set(self._runtime)
conf_list.append(conf3)
if self._automatic_storing:
self._traj.f_store_items(conf_list, store_data=pypetconstants.OVERWRITE_DATA)
if hasattr(self._traj.v_storage_service, 'finalize'):
# Finalize the storage service if this is supported
self._traj.v_storage_service.finalize()
incomplete = []
for run_name in self._traj.f_get_run_names():
if not self._traj._is_completed(run_name):
incomplete.append(run_name)
if len(incomplete) > 0:
self._logger.error('Following runs of trajectory `%s` '
'did NOT complete: `%s`' % (self._traj.v_name,
', '.join(incomplete)))
else:
self._logger.info('All runs of trajectory `%s` were completed successfully.' %
self._traj.v_name)
if self._sumatra_project is not None:
self._finish_sumatra()
return results
def _add_wildcard_config(self):
"""Adds config data about the wildcard functions"""
for idx, pair in enumerate(self._traj._wildcard_functions.items()):
wildcards, wc_function = pair
for jdx, wildcard in enumerate(wildcards):
config_name = ('environment.%s.wildcards.function_%d.wildcard_%d' %
(self.name, idx, jdx))
if not self._traj.f_contains('config.' + config_name):
self._traj.f_add_config(Parameter, config_name, wildcard,
comment='Wildcard symbol for the wildcard function').f_lock()
if hasattr(wc_function, '__name__'):
config_name = ('environment.%s.wildcards.function_%d.name' %
(self.name, idx))
if not self._traj.f_contains('config.' + config_name):
self._traj.f_add_config(Parameter, config_name, wc_function.__name__,
comment='Nme of wildcard function').f_lock()
if wc_function.__doc__:
config_name = ('environment.%s.wildcards.function_%d.doc' %
(self.name, idx))
if not self._traj.f_contains('config.' + config_name):
self._traj.f_add_config(Parameter, config_name, wc_function.__doc__,
comment='Docstring of wildcard function').f_lock()
try:
source = inspect.getsource(wc_function)
config_name = ('environment.%s.wildcards.function_%d.source' %
(self.name, idx))
if not self._traj.f_contains('config.' + config_name):
self._traj.f_add_config(Parameter, config_name, source,
comment='Source code of wildcard function').f_lock()
except Exception:
pass # We cannot find the source, just leave it
def _inner_run_loop(self, results):
"""Performs the inner loop of the run execution"""
start_run_idx = self._current_idx
expanded_by_postproc = False
self._storage_service = self._traj.v_storage_service
self._multiproc_wrapper = None
if self._resumable:
self._trigger_resume_snapshot()
self._logger.info(
'\n************************************************************\n'
'STARTING runs of trajectory\n`%s`.'
'\n************************************************************\n' %
self._traj.v_name)
while True:
if self._multiproc:
expanded_by_postproc = self._execute_multiprocessing(start_run_idx, results)
else:
# Create a generator to generate the tasks
iterator = self._make_iterator(start_run_idx)
n = start_run_idx
total_runs = len(self._traj)
# Signal start of progress calculation
self._show_progress(n - 1, total_runs)
for task in iterator:
result = _sigint_handling_single_run(task)
n = self._check_result_and_store_references(result, results,
n, total_runs)
repeat = False
if self._postproc is not None:
self._logger.info('Performing POSTPROCESSING')
repeat, start_run_idx, new_runs = self._execute_postproc(results)
if not repeat:
break
else:
expanded_by_postproc = True
self._logger.info('POSTPROCESSING expanded the trajectory and added %d new runs' %
new_runs)
# Do some finalization
self._traj._finalize(store_meta_data=True)
self._logger.info(
'\n************************************************************\n'
'FINISHED all runs of trajectory\n`%s`.'
'\n************************************************************\n' %
self._traj.v_name)
if self._resumable and self._delete_resume:
# We remove all resume files if the simulation was successfully completed
shutil.rmtree(self._resume_path)
if expanded_by_postproc:
config_name = 'environment.%s.postproc_expand' % self.name
if not self._traj.f_contains('config.' + config_name):
self._traj.f_add_config(Parameter, config_name, True,
comment='Added if trajectory was expanded '
'by postprocessing.')
def _get_results_from_queue(self, result_queue, results, n, total_runs):
"""Extract all available results from the queue and returns the increased n"""
# Get all results from the result queue
while not result_queue.empty():
result = result_queue.get()
n = self._check_result_and_store_references(result, results, n, total_runs)
return n
def _check_result_and_store_references(self, result, results, n, total_runs):
"""Checks for SIGINT and if reference wrapping and stores references."""
if result[0] == sigint_handling.SIGINT:
self._stop_iteration = True
result = result[1] # If SIGINT result is a nested tuple
if result is not None:
if self._wrap_mode == pypetconstants.WRAP_MODE_LOCAL:
self._multiproc_wrapper.store_references(result[2])
self._traj._update_run_information(result[1])
results.append(result[0])
if self._resumable:
# [0:2] to not store references
self._trigger_result_snapshot(result[0:2])
self._show_progress(n, total_runs)
n += 1
return n
def _trigger_result_snapshot(self, result):
""" Triggers a snapshot of the results for continuing
:param result: Currently computed result
"""
timestamp = result[1]['finish_timestamp']
timestamp_str = repr(timestamp).replace('.', '_')
filename = 'result_%s' % timestamp_str
extension = '.ncnt'
dump_filename = os.path.join(self._resume_path, filename + extension)
dump_file = open(dump_filename, 'wb')
dill.dump(result, dump_file, protocol=2)
dump_file.flush()
dump_file.close()
# We rename the file to be certain that the trajectory did not crash during taking
# the snapshot!
extension = '.rcnt'
rename_filename = os.path.join(self._resume_path, filename + extension)
shutil.move(dump_filename, rename_filename)
def _execute_multiprocessing(self, start_run_idx, results):
"""Performs multiprocessing and signals expansion by postproc"""
n = start_run_idx
total_runs = len(self._traj)
expanded_by_postproc = False
if (self._wrap_mode == pypetconstants.WRAP_MODE_NONE or
self._storage_service.multiproc_safe):
self._logger.info('I assume that your storage service is multiprocessing safe.')
else:
use_manager = (self._wrap_mode == pypetconstants.WRAP_MODE_QUEUE or
self._immediate_postproc)
self._multiproc_wrapper = MultiprocContext(self._traj,
self._wrap_mode,
full_copy=None,
manager=None,
use_manager=use_manager,
lock=None,
queue=None,
queue_maxsize=self._queue_maxsize,
port=self._url,
timeout=self._timeout,
gc_interval=self._gc_interval,
log_config=self._logging_manager.log_config,
log_stdout=self._logging_manager.log_stdout,
graceful_exit=self._graceful_exit)
self._multiproc_wrapper.start()
try:
if self._use_pool:
self._logger.info('Starting Pool with %d processes' % self._ncores)
if self._freeze_input:
self._logger.info('Freezing pool input')
init_kwargs = self._make_kwargs()
# To work under windows we must allow the full-copy now!
# Because windows does not support forking!
pool_full_copy = self._traj.v_full_copy
self._traj.v_full_copy = True
initializer = _configure_frozen_pool
target = _frozen_pool_single_run
else:
# We don't want to pickle the storage service
pool_service = self._traj.v_storage_service
self._traj.v_storage_service = None
init_kwargs = dict(logging_manager=self._logging_manager,
storage_service=pool_service,
niceness=self._niceness)
initializer = _configure_pool
target = _pool_single_run
try:
iterator = self._make_iterator(start_run_idx)
mpool = multip.Pool(self._ncores, initializer=initializer,
initargs=(init_kwargs,))
pool_results = mpool.imap(target, iterator)
# Signal start of progress calculation
self._show_progress(n - 1, total_runs)
for result in pool_results:
n = self._check_result_and_store_references(result, results,
n, total_runs)
# Everything is done
mpool.close()
mpool.join()
finally:
if self._freeze_input:
self._traj.v_full_copy = pool_full_copy
else:
self._traj.v_storage_service = pool_service
self._logger.info('Pool has joined, will delete it.')
del mpool
elif self._use_scoop:
self._logger.info('Starting SCOOP jobs')
if self._freeze_input:
self._logger.info('Freezing SCOOP input')
if hasattr(_frozen_scoop_single_run, 'kwargs'):
self._logger.warning('You already did run an experiment with '
'SCOOP and a frozen input. Frozen input '
'is realized as a shared constant, so'
'over time your memory might get bloated. '
'If you experience trouble, '
'restart your python interpreter and '
'SCOOP.')
_frozen_scoop_single_run.kwargs = {}
scoop_full_copy = self._traj.v_full_copy
self._traj.v_full_copy = True
init_kwargs = self._make_kwargs()
scoop_rev = self.name + '_' + str(time.time()).replace('.','_')
shared.setConst(**{scoop_rev: init_kwargs})
iterator = self._make_iterator(start_run_idx,
copy_data=True,
scoop_rev=scoop_rev)
target = _frozen_scoop_single_run
else:
iterator = self._make_iterator(start_run_idx,
copy_data=True)
target = _scoop_single_run
try:
if scoop.IS_RUNNING:
scoop_results = futures.map(target, iterator, timeout=self._timeout)
else:
self._logger.error('SCOOP is NOT running, I will use Python`s map '
'function. To activate scoop, start your script via '
'`python -m scoop your_script.py`.')
scoop_results = map(target, iterator)
# Signal start of progress calculation
self._show_progress(n - 1, total_runs)
for result in scoop_results:
n = self._check_result_and_store_references(result, results,
n, total_runs)
finally:
if self._freeze_input:
self._traj.v_full_copy = scoop_full_copy
else:
# If we spawn a single process for each run, we need an additional queue
# for the results of `runfunc`
if self._immediate_postproc:
maxsize = 0
else:
maxsize = total_runs
start_result_length = len(results)
result_queue = multip.Queue(maxsize=maxsize)
# Create a generator to generate the tasks for multiprocessing
iterator = self._make_iterator(start_run_idx, result_queue=result_queue)
self._logger.info('Starting multiprocessing with at most '
'%d processes running at the same time.' % self._ncores)
if self._check_usage:
self._logger.info(
'Monitoring usage statistics. I will not spawn new processes '
'if one of the following cap thresholds is crossed, '
'CPU: %.1f %%, RAM: %.1f %%, Swap: %.1f %%.' %
(self._cpu_cap, self._memory_cap[0], self._swap_cap))
keep_running = True # Evaluates to false if trajectory produces
# no more single runs
process_dict = {} # Dict containing all subprocees
# For the cap values, we lazily evaluate them
cpu_usage_func = lambda: self._estimate_cpu_utilization()
memory_usage_func = lambda: self._estimate_memory_utilization(process_dict)
swap_usage_func = lambda: psutil.swap_memory().percent
signal_cap = True # If True cap warning is emitted
max_signals = 10 # Maximum number of warnings, after that warnings are
# no longer signaled
# Signal start of progress calculation
self._show_progress(n - 1, total_runs)
while len(process_dict) > 0 or keep_running:
# First check if some processes did finish their job
for pid in compat.listkeys(process_dict):
proc = process_dict[pid]
# Delete the terminated processes
if not proc.is_alive():
proc.join()
del process_dict[pid]
del proc
# Check if caps are reached.
# Cap is only checked if there is at least one
# process working to prevent deadlock.
no_cap = True
if self._check_usage and self._ncores > len(process_dict) > 0:
for cap_name, cap_function, threshold in (
('CPU Cap', cpu_usage_func, self._cpu_cap),
('Memory Cap', memory_usage_func, self._memory_cap[0]),
('Swap Cap', swap_usage_func, self._swap_cap)):
cap_value = cap_function()
if cap_value > threshold:
no_cap = False
if signal_cap:
if cap_name == 'Memory Cap':
add_on_str = ' [including estimate]'
else:
add_on_str = ''
self._logger.warning('Could not start next process '
'immediately [currently running '
'%d process(es)]. '
'%s reached, '
'%.1f%% >= %.1f%%%s.' %
(len(process_dict), cap_name,
cap_value, threshold,
add_on_str))
signal_cap = False
max_signals -= 1
if max_signals == 0:
self._logger.warning('Maximum number of cap warnings '
'reached. I will no longer '
'notify about cap violations, '
'but cap values are still applied '
'silently in background.')
break # If one cap value is reached we can skip the rest
# If we have less active processes than
# self._ncores and there is still
# a job to do, add another process
if len(process_dict) < self._ncores and keep_running and no_cap:
try:
task = next(iterator)
proc = multip.Process(target=_process_single_run,
args=(task,))
proc.start()
process_dict[proc.pid] = proc
signal_cap = max_signals > 0 # Only signal max_signals times
except StopIteration:
# All simulation runs have been started
keep_running = False
if self._postproc is not None and self._immediate_postproc:
if self._wrap_mode == pypetconstants.WRAP_MODE_LOCAL:
reference_service = self._traj._storage_service
self._traj.v_storage_service = self._storage_service
try:
self._logger.info('Performing IMMEDIATE POSTPROCESSING.')
keep_running, start_run_idx, new_runs = \
self._execute_postproc(results)
finally:
if self._wrap_mode == pypetconstants.WRAP_MODE_LOCAL:
self._traj._storage_service = reference_service
if keep_running:
expanded_by_postproc = True
self._logger.info('IMMEDIATE POSTPROCESSING expanded '
'the trajectory and added %d '
'new runs' % new_runs)
n = start_run_idx
total_runs = len(self._traj)
iterator = self._make_iterator(start_run_idx,
result_queue=result_queue)
if not keep_running:
self._logger.debug('All simulation runs have been started. '
'No new runs will be started. '
'The simulation will finish after the still '
'active runs completed.')
else:
time.sleep(0.001)
# Get all results from the result queue
n = self._get_results_from_queue(result_queue, results, n, total_runs)
# Finally get all results from the result queue once more and finalize the queue
self._get_results_from_queue(result_queue, results, n, total_runs)
result_queue.close()
result_queue.join_thread()
del result_queue
result_sort(results, start_result_length)
finally:
# Finalize the wrapper
if self._multiproc_wrapper is not None:
self._multiproc_wrapper.finalize()
self._multiproc_wrapper = None
return expanded_by_postproc
@prefix_naming
class MultiprocContext(HasLogger):
""" A lightweight environment that allows the usage of multiprocessing.
Can be used if you don't want a full-blown :class:`~pypet.environment.Environment` to
enable multiprocessing or if you want to implement your own custom multiprocessing.
This Wrapper tool will take a trajectory container and take care that the storage
service is multiprocessing safe. Supports the ``'LOCK'`` as well as the ``'QUEUE'`` mode.
In case of the latter an extra queue process is created if desired.
This process will handle all storage requests and write data to the hdf5 file.
Not that in case of ``'QUEUE'`` wrapping data can only be stored not loaded, because
the queue will only be read in one direction.
:param trajectory:
The trajectory which storage service should be wrapped
:param wrap_mode:
There are four options:
:const:`~pypet.pypetconstants.WRAP_MODE_QUEUE`: ('QUEUE')
If desired another process for storing the trajectory is spawned.
The sub processes running the individual trajectories will add their results to a
multiprocessing queue that is handled by an additional process.
Note that this requires additional memory since data
will be pickled and send over the queue for storage!
:const:`~pypet.pypetconstants.WRAP_MODE_LOCK`: ('LOCK')
Each individual process takes care about storage by itself. Before
carrying out the storage, a lock is placed to prevent the other processes
to store data. Accordingly, sometimes this leads to a lot of processes
waiting until the lock is released.
Yet, data does not need to be pickled before storage!
:const:`~pypet.pypetconstants.WRAP_MODE_PIPE`: ('PIPE)
Experimental mode based on a single pipe. Is faster than ``'QUEUE'`` wrapping
but data corruption may occur, does not work under Windows
(since it relies on forking).
:const:`~pypet.pypetconstant.WRAP_MODE_LOCAL` ('LOCAL')
Data is not stored in spawned child processes, but data needs to be
retunred manually in terms of *references* dictionaries (the ``reference`` property
of the ``ReferenceWrapper`` class)..
Storing is only performed in the main process.
Note that removing data during a single run has no longer an effect on memory
whatsoever, because there are references kept for all data
that is supposed to be stored.
:param full_copy:
In case the trajectory gets pickled (sending over a queue or a pool of processors)
if the full trajectory should be copied each time (i.e. all parameter points) or
only a particular point. A particular point can be chosen beforehand with
:func:`~pypet.trajectory.Trajectory.f_set_crun`.
Leave ``full_copy=None`` if the setting from the passed trajectory should be used.
Otherwise ``v_full_copy`` of the trajectory is changed to your chosen value.
:param manager:
You can pass an optional multiprocessing manager here,
if you already have instantiated one.
Leave ``None`` if you want the wrapper to create one.
:param use_manager:
If your lock and queue should be created with a manager or if wrapping should be
created from the multiprocessing module directly.
For example: ``multiprocessing.Lock()`` or via a manager
``multiprocessing.Manager().Lock()``
(if you specified a manager, this manager will be used).
The former is usually faster whereas the latter is more flexible and can
be used in an environment where fork is not available, for instance.
:param lock:
You can pass a multiprocessing lock here, if you already have instantiated one.
Leave ``None`` if you want the wrapper to create one in case of ``'LOCK'`` wrapping.
:param queue:
You can pass a multiprocessing queue here, if you already instantiated one.
Leave ``None`` if you want the wrapper to create one in case of ''`QUEUE'`` wrapping.
:param queue_maxsize:
Maximum size of queue if created new. 0 means infinite.
:param port:
Port to be used by lock server in case of ``'NETLOCK'`` wrapping.
Can be a single integer as well as a tuple ``(7777, 9999)`` to specify
a range of ports from which to pick a random one.
Leave `None` for using pyzmq's default range.
In case automatic determining of the host's ip address fails,
you can also pass the full address (including the protocol and
the port) of the host in the network like ``'tcp://127.0.0.1:7777'``.
:param timeout:
Timeout for a NETLOCK wrapping in seconds. After ``timeout``
seconds a lock is automatically released and free for other
processes.
:param gc_interval:
Interval (in runs or storage operations) with which ``gc.collect()``
should be called in case of the ``'LOCAL'``, ``'QUEUE'``, or ``'PIPE'`` wrapping.
Leave ``None`` for never.
``1`` means after every storing, ``2`` after every second storing, and so on.
Only calls ``gc.collect()`` in the main (if ``'LOCAL'`` wrapping)
or the queue/pipe process. If you need to garbage collect data within your single runs,
you need to manually call ``gc.collect()``.
Usually, there is no need to set this parameter since the Python garbage collection
works quite nicely and schedules collection automatically.
:param log_config:
Path to logging config file or dictionary to configure logging for the
spawned queue process. Thus, only considered if the queue wrap mode is chosen.
:param log_stdout:
If stdout of the queue process should also be logged.
:param graceful_exit:
Hitting Ctrl+C won't kill a server process unless hit twice.
For an usage example see :ref:`example-16`.
"""
def __init__(self, trajectory,
wrap_mode=pypetconstants.WRAP_MODE_LOCK,
full_copy=None,
manager=None,
use_manager=True,
lock=None,
queue=None,
queue_maxsize=0,
port=None,
timeout=None,
gc_interval=None,
log_config=None,
log_stdout=False,
graceful_exit=False):
self._set_logger()
self._manager = manager
self._traj = trajectory
self._storage_service = self._traj.v_storage_service
self._queue_process = None
self._pipe_process = None
self._lock_wrapper = None
self._queue_wrapper = None
self._reference_wrapper = None
self._wrap_mode = wrap_mode
self._queue = queue
self._queue_maxsize = queue_maxsize
self._pipe = queue
self._max_buffer_size = queue_maxsize
self._lock = lock
self._lock_process = None
self._port = port
self._timeout = timeout
self._use_manager = use_manager
self._logging_manager = None
self._gc_interval = gc_interval
self._graceful_exit = graceful_exit
if (self._wrap_mode == pypetconstants.WRAP_MODE_QUEUE or
self._wrap_mode == pypetconstants.WRAP_MODE_PIPE or
self._wrap_mode == pypetconstants.WRAP_MODE_NETLOCK or
self._wrap_mode == pypetconstants.WRAP_MODE_NETQUEUE):
self._logging_manager = LoggingManager(log_config=log_config,
log_stdout=log_stdout)
self._logging_manager.extract_replacements(self._traj)
self._logging_manager.check_log_config()
if full_copy is not None:
self._traj.v_full_copy=full_copy
@property
def lock(self):
return self._lock
@property
def queue(self):
return self._queue
@property
def pipe(self):
return self._pipe
@property
def queue_wrapper(self):
return self._queue_wrapper
@property
def reference_wrapper(self):
return self._reference_wrapper
@property
def lock_wrapper(self):
return self._lock_wrapper
@property
def pipe_wrapper(self):
return self._pipe_wrapper
def __enter__(self):
self.start()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.finalize()
def store_references(self, references):
"""In case of reference wrapping, stores data.
:param references: References dictionary from a ReferenceWrapper.
:param gc_collect: If ``gc.collect`` should be called.
:param n:
Alternatively if ``gc_interval`` is set, a current index can be passed.
Data is stored in case ``n % gc_interval == 0``.
"""
self._reference_store.store_references(references)
def start(self):
"""Starts the multiprocess wrapping.
Automatically called when used as context manager.
"""
self._do_wrap()
def _do_wrap(self):
""" Wraps a Storage Service """
# First take care that the storage is initialised
self._traj.f_store(only_init=True)
if self._wrap_mode == pypetconstants.WRAP_MODE_QUEUE:
self._prepare_queue()
elif self._wrap_mode == pypetconstants.WRAP_MODE_LOCK:
self._prepare_lock()
elif self._wrap_mode == pypetconstants.WRAP_MODE_PIPE:
self._prepare_pipe()
elif self._wrap_mode == pypetconstants.WRAP_MODE_LOCAL:
self._prepare_local()
elif self._wrap_mode == pypetconstants.WRAP_MODE_NETLOCK:
self._prepare_netlock()
elif self._wrap_mode == pypetconstants.WRAP_MODE_NETQUEUE:
self._prepare_netqueue()
else:
raise RuntimeError('The mutliprocessing mode %s, your choice is '
'not supported, use %s`, `%s`, %s, `%s`, or `%s`.'
% (self._wrap_mode, pypetconstants.WRAP_MODE_QUEUE,
pypetconstants.WRAP_MODE_LOCK,
pypetconstants.WRAP_MODE_PIPE,
pypetconstants.WRAP_MODE_LOCAL,
pypetconstants.WRAP_MODE_NETLOCK))
def _prepare_local(self):
reference_wrapper = ReferenceWrapper()
self._traj.v_storage_service = reference_wrapper
self._reference_wrapper = reference_wrapper
self._reference_store = ReferenceStore(self._storage_service, self._gc_interval)
def _prepare_netlock(self):
""" Replaces the trajectory's service with a LockWrapper """
if not isinstance(self._port, compat.base_type):
url = port_to_tcp(self._port)
self._logger.info('Determined Server URL: `%s`' % url)
else:
url = self._port
if self._lock is None:
if hasattr(os, 'fork'):
self._lock = ForkAwareLockerClient(url)
else:
self._lock = LockerClient(url)
if self._timeout is None:
lock_server = LockerServer(url)
else:
lock_server = TimeOutLockerServer(url, self._timeout)
self._logger.info('Using timeout aware lock server.')
self._lock_process = multip.Process(name='LockServer', target=_wrap_handling,
args=(dict(handler=lock_server,
logging_manager=self._logging_manager,
graceful_exit=self._graceful_exit),))
# self._lock_process = threading.Thread(name='LockServer', target=_wrap_handling,
# args=(dict(handler=lock_server,
# logging_manager=self._logging_manager),))
self._lock_process.start()
self._lock.start()
# Wrap around the storage service to allow the placement of locks around
# the storage procedure.
lock_wrapper = LockWrapper(self._storage_service, self._lock)
self._traj.v_storage_service = lock_wrapper
self._lock_wrapper = lock_wrapper
def _prepare_lock(self):
""" Replaces the trajectory's service with a LockWrapper """
if self._lock is None:
if self._use_manager:
if self._manager is None:
self._manager = multip.Manager()
# We need a lock that is shared by all processes.
self._lock = self._manager.Lock()
else:
self._lock = multip.Lock()
# Wrap around the storage service to allow the placement of locks around
# the storage procedure.
lock_wrapper = LockWrapper(self._storage_service, self._lock)
self._traj.v_storage_service = lock_wrapper
self._lock_wrapper = lock_wrapper
def _prepare_pipe(self):
""" Replaces the trajectory's service with a queue sender and starts the queue process.
"""
if self._pipe is None:
self._pipe = multip.Pipe(True)
if self._lock is None:
self._lock = multip.Lock()
self._logger.info('Starting the Storage Pipe!')
# Wrap a queue writer around the storage service
pipe_handler = PipeStorageServiceWriter(self._storage_service, self._pipe[0],
max_buffer_size=self._max_buffer_size)
# Start the queue process
self._pipe_process = multip.Process(name='PipeProcess', target=_wrap_handling,
args=(dict(handler=pipe_handler,
logging_manager=self._logging_manager,
graceful_exit=self._graceful_exit),))
self._pipe_process.start()
# Replace the storage service of the trajectory by a sender.
# The sender will put all data onto the pipe.
# The writer from above will receive the data from
# the pipe and hand it over to
# the storage service
self._pipe_wrapper = PipeStorageServiceSender(self._pipe[1], self._lock)
self._traj.v_storage_service = self._pipe_wrapper
def _prepare_queue(self):
""" Replaces the trajectory's service with a queue sender and starts the queue process.
"""
if self._queue is None:
if self._use_manager:
if self._manager is None:
self._manager = multip.Manager()
self._queue = self._manager.Queue(maxsize=self._queue_maxsize)
else:
self._queue = multip.Queue(maxsize=self._queue_maxsize)
self._logger.info('Starting the Storage Queue!')
# Wrap a queue writer around the storage service
queue_handler = QueueStorageServiceWriter(self._storage_service, self._queue,
self._gc_interval)
# Start the queue process
self._queue_process = multip.Process(name='QueueProcess', target=_wrap_handling,
args=(dict(handler=queue_handler,
logging_manager=self._logging_manager,
graceful_exit=self._graceful_exit),))
self._queue_process.start()
# Replace the storage service of the trajectory by a sender.
# The sender will put all data onto the queue.
# The writer from above will receive the data from
# the queue and hand it over to
# the storage service
self._queue_wrapper = QueueStorageServiceSender(self._queue)
self._traj.v_storage_service = self._queue_wrapper
def _prepare_netqueue(self):
""" Replaces the trajectory's service with a queue sender and starts the queue process.
"""
self._logger.info('Starting Network Queue!')
if not isinstance(self._port, compat.base_type):
url = port_to_tcp(self._port)
self._logger.info('Determined Server URL: `%s`' % url)
else:
url = self._port
if self._queue is None:
if hasattr(os, 'fork'):
self._queue = ForkAwareQueuingClient(url)
else:
self._queue = QueuingClient(url)
# Wrap a queue writer around the storage service
queuing_server_handler = QueuingServer(url,
self._storage_service,
self._queue_maxsize,
self._gc_interval)
# Start the queue process
self._queue_process = multip.Process(name='QueuingServerProcess', target=_wrap_handling,
args=(dict(handler=queuing_server_handler,
logging_manager=self._logging_manager,
graceful_exit=self._graceful_exit),))
self._queue_process.start()
self._queue.start()
# Replace the storage service of the trajectory by a sender.
# The sender will put all data onto the queue.
# The writer from above will receive the data from
# the queue and hand it over to
# the storage service
self._queue_wrapper = QueueStorageServiceSender(self._queue)
self._traj.v_storage_service = self._queue_wrapper
def finalize(self):
""" Restores the original storage service.
If a queue process and a manager were used both are shut down.
Automatically called when used as context manager.
"""
if (self._wrap_mode == pypetconstants.WRAP_MODE_QUEUE and
self._queue_process is not None):
self._logger.info('The Storage Queue will no longer accept new data. '
'Hang in there for a little while. '
'There still might be some data in the queue that '
'needs to be stored.')
# We might have passed the queue implicitly,
# to be sure we add the queue here again
self._traj.v_storage_service.queue = self._queue
self._traj.v_storage_service.send_done()
self._queue_process.join()
if hasattr(self._queue, 'join'):
self._queue.join()
if hasattr(self._queue, 'close'):
self._queue.close()
if hasattr(self._queue, 'join_thread'):
self._queue.join_thread()
self._logger.info('The Storage Queue has joined.')
elif (self._wrap_mode == pypetconstants.WRAP_MODE_PIPE and
self._pipe_process is not None):
self._logger.info('The Storage Pipe will no longer accept new data. '
'Hang in there for a little while. '
'There still might be some data in the pipe that '
'needs to be stored.')
self._traj.v_storage_service.conn = self._pipe[1]
self._traj.v_storage_service.send_done()
self._pipe_process.join()
self._pipe[1].close()
self._pipe[0].close()
elif (self._wrap_mode == pypetconstants.WRAP_MODE_NETLOCK and
self._lock_process is not None):
self._lock.send_done()
self._lock.finalize()
self._lock_process.join()
elif (self._wrap_mode == pypetconstants.WRAP_MODE_NETQUEUE and
self._queue_process is not None):
self._queue.send_done()
self._queue.finalize()
self._queue_process.join()
if self._manager is not None:
self._manager.shutdown()
self._manager = None
self._queue_process = None
self._queue = None
self._queue_wrapper = None
self._lock = None
self._lock_wrapper = None
self._lock_process = None
self._reference_wrapper = None
self._pipe = None
self._pipe_process = None
self._pipe_wrapper = None
self._logging_manager = None
self._traj._storage_service = self._storage_service
def __del__(self):
self.finalize()
| bsd-3-clause |
salma1601/process-asl | procasl/externals/nistats/first_level_model.py | 2 | 21160 | # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
"""
This module presents an interface to use the glm implemented in
nistats.regression.
It contains the GLM and contrast classes that are meant to be the main objects
of fMRI data analyses.
"""
from warnings import warn
import time
import sys
import numpy as np
from nibabel import Nifti1Image
from sklearn.base import BaseEstimator, TransformerMixin, clone
from sklearn.externals.joblib import Memory
from nilearn._utils.niimg_conversions import check_niimg
from nilearn._utils import CacheMixin
from nilearn.input_data import NiftiMasker
from sklearn.externals.joblib import Parallel, delayed
from .regression import OLSModel, ARModel, SimpleRegressionResults
from .design_matrix import make_design_matrix
from .contrasts import _fixed_effect_contrast
from .utils import _basestring, _check_run_tables
def mean_scaling(Y, axis=0):
"""Scaling of the data to have percent of baseline change along the
specified axis
Parameters
----------
Y : array of shape (n_time_points, n_voxels)
The input data.
Returns
-------
Y : array of shape (n_time_points, n_voxels),
The data after mean-scaling, de-meaning and multiplication by 100.
mean : array of shape (n_voxels,)
The data mean.
"""
mean = Y.mean(axis=axis)
if (mean == 0).any():
warn('Mean values of 0 observed.'
'The data have probably been centered.'
'Scaling might not work as expected')
mean = np.maximum(mean, 1)
Y = 100 * (Y / mean - 1)
return Y, mean
def _ar_model_fit(X, val, Y):
"""Wrapper for fit method of ARModel to allow parallelization with joblib"""
return ARModel(X, val).fit(Y)
def run_glm(Y, X, noise_model='ar1', bins=100, n_jobs=1, verbose=0):
""" GLM fit for an fMRI data matrix
Parameters
----------
Y : array of shape (n_time_points, n_voxels)
The fMRI data.
X : array of shape (n_time_points, n_regressors)
The design matrix.
noise_model : {'ar1', 'ols'}, optional
The temporal variance model. Defaults to 'ar1'.
bins : int, optional
Maximum number of discrete bins for the AR(1) coef histogram.
n_jobs : int, optional
The number of CPUs to use to do the computation. -1 means
'all CPUs'.
verbose : int, optional
The verbosity level. Defaut is 0
Returns
-------
labels : array of shape (n_voxels,),
A map of values on voxels used to identify the corresponding model.
results : dict,
Keys correspond to the different labels values
values are RegressionResults instances corresponding to the voxels.
"""
acceptable_noise_models = ['ar1', 'ols']
if noise_model not in acceptable_noise_models:
raise ValueError(
"Acceptable noise models are {0}. You provided 'noise_model={1}'".\
format(acceptable_noise_models, noise_model))
if Y.shape[0] != X.shape[0]:
raise ValueError(
'The number of rows of Y should match the number of rows of X.'
' You provided X with shape {0} and Y with shape {1}'.\
format(X.shape, Y.shape))
# Create the model
ols_result = OLSModel(X).fit(Y)
if noise_model == 'ar1':
# compute and discretize the AR1 coefs
ar1 = ((ols_result.resid[1:] * ols_result.resid[:-1]).sum(axis=0) /
(ols_result.resid ** 2).sum(axis=0))
del ols_result
ar1 = (ar1 * bins).astype(np.int) * 1. / bins
# Fit the AR model acccording to current AR(1) estimates
results = {}
labels = ar1
# Parallelize by creating a job per ARModel
vals = np.unique(ar1)
ar_result = Parallel(n_jobs=n_jobs, verbose=verbose)(
delayed(_ar_model_fit)(X, val, Y[:, labels == val]) for val in vals)
for val, result in zip(vals, ar_result):
results[val] = result
del vals
del ar_result
else:
labels = np.zeros(Y.shape[1])
results = {0.0: ols_result}
return labels, results
class FirstLevelModel(BaseEstimator, TransformerMixin, CacheMixin):
""" Implementation of the General Linear Model for single session fMRI data
Parameters
----------
t_r: float
This parameter indicates repetition times of the experimental runs.
In seconds. It is necessary to correctly consider times in the design
matrix. This parameter is also passed to nilearn.signal.clean.
Please see the related documentation for details.
slice_time_ref: float, optional (default 0.)
This parameter indicates the time of the reference slice used in the
slice timing preprocessing step of the experimental runs. It is
expressed as a percentage of the t_r (time repetition), so it can have
values between 0. and 1.
hrf_model : string, optional
This parameter specifies the hemodynamic response function (HRF) for
the design matrices. It can be 'canonical', 'canonical with derivative'
or 'fir'.
drift_model : string, optional
This parameter specifies the desired drift model for the design
matrices. It can be 'polynomial', 'cosine' or 'blank'.
period_cut : float, optional
This parameter specifies the cut period of the low-pass filter in
seconds for the design matrices.
drift_order : int, optional
This parameter specifices the order of the drift model (in case it is
polynomial) for the design matrices.
fir_delays : array of shape(n_onsets) or list, optional
In case of FIR design, yields the array of delays used in the FIR
model, in seconds.
min_onset : float, optional
This parameter specifies the minimal onset relative to the design
(in seconds). Events that start before (slice_time_ref * t_r +
min_onset) are not considered.
mask: Niimg-like, NiftiMasker or MultiNiftiMasker object, optional,
Mask to be used on data. If an instance of masker is passed,
then its mask will be used. If no mask is given,
it will be computed automatically by a MultiNiftiMasker with default
parameters.
target_affine: 3x3 or 4x4 matrix, optional
This parameter is passed to nilearn.image.resample_img. Please see the
related documentation for details.
target_shape: 3-tuple of integers, optional
This parameter is passed to nilearn.image.resample_img. Please see the
related documentation for details.
smoothing_fwhm: float, optional
If smoothing_fwhm is not None, it gives the size in millimeters of the
spatial smoothing to apply to the signal.
memory: string, optional
Path to the directory used to cache the masking process and the glm
fit. By default, no caching is done. Creates instance of joblib.Memory.
memory_level: integer, optional
Rough estimator of the amount of memory used by caching. Higher value
means more memory for caching.
standardize : boolean, optional
If standardize is True, the time-series are centered and normed:
their variance is put to 1 in the time dimension.
signal_scaling: False, int or (int, int), optional,
If not False, fMRI signals are scaled to the mean value of scaling_axis
given, which can be 0, 1 or (0, 1). 0 refers to mean scaling each voxel
with respect to time, 1 refers to mean scaling each time point with
respect to all voxels and (0, 1) refers to scaling with respect to
voxels and time, which is known as grand mean scaling.
Incompatible with standardize (standardize=False is enforced when
signal_scaling is not False).
noise_model : {'ar1', 'ols'}, optional
The temporal variance model. Defaults to 'ar1'
verbose : integer, optional
Indicate the level of verbosity. By default, nothing is printed.
n_jobs : integer, optional
The number of CPUs to use to do the computation. -1 means
'all CPUs', -2 'all CPUs but one', and so on.
minimize_memory : boolean, optional
Gets rid of some variables on the model fit results that are not
necessary for contrast computation and would only be useful for
further inspection of model details. This has an important impact
on memory consumption. True by default.
Attributes
----------
labels : array of shape (n_voxels,),
a map of values on voxels used to identify the corresponding model
results : dict,
with keys corresponding to the different labels values
values are RegressionResults instances corresponding to the voxels
"""
def __init__(self, t_r=None, slice_time_ref=0., hrf_model='glover',
drift_model='cosine', period_cut=128, drift_order=1,
fir_delays=[0], min_onset=-24, mask=None, target_affine=None,
target_shape=None, smoothing_fwhm=None, memory=Memory(None),
memory_level=1, standardize=False, signal_scaling=0,
noise_model='ar1', verbose=1, n_jobs=1,
minimize_memory=True):
# design matrix parameters
self.t_r = t_r
self.slice_time_ref = slice_time_ref
self.hrf_model = hrf_model
self.drift_model = drift_model
self.period_cut = period_cut
self.drift_order = drift_order
self.fir_delays = fir_delays
self.min_onset = min_onset
# glm parameters
self.mask = mask
self.target_affine = target_affine
self.target_shape = target_shape
self.smoothing_fwhm = smoothing_fwhm
if isinstance(memory, _basestring):
self.memory = Memory(memory)
else:
self.memory = memory
self.memory_level = memory_level
self.standardize = standardize
if signal_scaling in [0, 1, (0, 1)]:
self.scaling_axis = signal_scaling
self.signal_scaling = True
self.standardize = False
elif signal_scaling is False:
self.signal_scaling = signal_scaling
else:
raise ValueError('signal_scaling must be "False", "0", "1"'
' or "(0, 1)"')
self.noise_model = noise_model
self.verbose = verbose
self.n_jobs = n_jobs
self.minimize_memory = minimize_memory
# attributes
self.labels_ = None
self.results_ = None
def fit(self, run_imgs, paradigms=None, confounds=None,
design_matrices=None):
""" Fit the GLM
For each run:
1. create design matrix X
2. do a masker job: fMRI_data -> Y
3. fit regression to (Y, X)
Parameters
----------
run_imgs: Niimg-like object or list of Niimg-like objects,
See http://nilearn.github.io/building_blocks/manipulating_mr_images.html#niimg.
Data on which the GLM will be fitted. If this is a list,
the affine is considered the same for all.
paradigms: pandas Dataframe or string or list of pandas DataFrames or
strings,
fMRI paradigms used to build design matrices. One paradigm expected
per run_img. Ignored in case designs is not None.
confounds: pandas Dataframe or string or list of pandas DataFrames or
strings,
Each column in a DataFrame corresponds to a confound variable
to be included in the regression model of the respective run_img.
The number of rows must match the number of volumes in the
respective run_img. Ignored in case designs is not None.
design_matrices: pandas DataFrame or list of pandas DataFrames,
Design matrices that will be used to fit the GLM.
"""
# Check arguments
# Check imgs type
if not isinstance(run_imgs, (list, tuple)):
run_imgs = [run_imgs]
for rimg in run_imgs:
if not isinstance(rimg, (_basestring, Nifti1Image)):
raise ValueError('run_imgs must be Niimg-like object or list'
' of Niimg-like objects')
# check all information necessary to build design matrices is available
if design_matrices is None:
if paradigms is None:
raise ValueError('paradigms or design matrices must be provided')
if self.t_r is None:
raise ValueError('t_r not given to FirstLevelModel object'
' to compute design from paradigm')
else:
design_matrices = _check_run_tables(run_imgs, design_matrices,
'design_matrices')
# check the number of paradigm and confound files match number of runs
# Also check paradigm and confound files can be loaded as DataFrame
if paradigms is not None:
paradigms = _check_run_tables(run_imgs, paradigms, 'paradigms')
if confounds is not None:
confounds = _check_run_tables(run_imgs, confounds, 'confounds')
# Learn the mask
if not isinstance(self.mask, NiftiMasker):
self.masker_ = NiftiMasker(
mask_img=self.mask, smoothing_fwhm=self.smoothing_fwhm,
target_affine=self.target_affine,
standardize=self.standardize, mask_strategy='epi',
t_r=self.t_r, memory=self.memory,
verbose=max(0, self.verbose - 1),
target_shape=self.target_shape,
memory_level=self.memory_level)
else:
self.masker_ = clone(self.mask)
for param_name in ['target_affine', 'target_shape',
'smoothing_fwhm', 'low_pass', 'high_pass',
't_r', 'memory', 'memory_level']:
our_param = getattr(self, param_name)
if our_param is None:
continue
if getattr(self.masker_, param_name) is not None:
warn('Parameter %s of the masker overriden' % param_name)
setattr(self.masker_, param_name, our_param)
self.masker_.fit(run_imgs[0])
# For each run fit the model and keep only the regression results.
self.labels_, self.results_, self.design_matrices_ = [], [], []
n_runs = len(run_imgs)
t0 = time.time()
for run_idx, run_img in enumerate(run_imgs):
# Report progress
if self.verbose > 0:
percent = float(run_idx) / n_runs
percent = round(percent * 100, 2)
dt = time.time() - t0
# We use a max to avoid a division by zero
if run_idx == 0:
remaining = 'go take a coffee, a big one'
else:
remaining = (100. - percent) / max(0.01, percent) * dt
remaining = '%i seconds remaining' % remaining
sys.stderr.write(" " * 100 + "\r")
sys.stderr.write(
"Computing run %d out of %d runs (%s)\r"
% (run_idx, n_runs, remaining))
# Build the experimental design for the glm
run_img = check_niimg(run_img, ensure_ndim=4)
if design_matrices is None:
n_scans = run_img.get_data().shape[3]
if confounds is not None:
confounds_matrix = confounds[run_idx].values
if confounds_matrix.shape[0] != n_scans:
raise ValueError('Rows in confounds does not match'
'n_scans in run_img at index %d'
% (run_idx,))
confounds_names = confounds[run_idx].columns
else:
confounds_matrix = None
confounds_names = None
start_time = self.slice_time_ref * self.t_r
end_time = (n_scans - 1 + self.slice_time_ref) * self.t_r
frame_times = np.linspace(start_time, end_time, n_scans)
design = make_design_matrix(frame_times, paradigms[run_idx],
self.hrf_model, self.drift_model,
self.period_cut, self.drift_order,
self.fir_delays, confounds_matrix,
confounds_names, self.min_onset)
else:
design = design_matrices[run_idx]
self.design_matrices_.append(design)
# Compute GLM
Y = self.masker_.transform(run_img)
if self.signal_scaling:
Y, _ = mean_scaling(Y, self.scaling_axis)
if self.memory is not None:
mem_glm = self.memory.cache(run_glm)
else:
mem_glm = run_glm
labels, results = mem_glm(Y, design,
noise_model=self.noise_model,
bins=100, n_jobs=self.n_jobs)
self.labels_.append(labels)
# We save memory if inspecting model details is not necessary
if self.minimize_memory:
for key in results:
results[key] = SimpleRegressionResults(results[key])
self.results_.append(results)
del Y
# Report progress
if self.verbose > 0:
sys.stderr.write("\nComputation of %d runs done in %i seconds\n"
% (n_runs, time.time() - t0))
return self
def compute_contrast(self, contrast_def, contrast_name=None,
stat_type=None, output_type='z_score'):
"""Generate different outputs corresponding to
the contrasts provided e.g. z_map, t_map, effects and variance.
In multi-session case, outputs the fixed effects map.
Parameters
----------
contrast_def : array or list of arrays of shape (n_col) or (n_run, n_col)
where ``n_col`` is the number of columns of the design matrix,
(one array per run). If only one array is provided when there
are several runs, it will be assumed that the same contrast is
desired for all runs
contrast_name : str, optional
name of the contrast
stat_type : {'t', 'F'}, optional
type of the contrast
output_type : str, optional
Type of the output map. Can be 'z_score', 'stat', 'p_value',
'effect_size' or 'effect_variance'
Returns
-------
output_image : Nifti1Image
The desired output image
"""
if self.labels_ is None or self.results_ is None:
raise ValueError('The model has not been fit yet')
if isinstance(contrast_def, np.ndarray):
con_vals = [contrast_def]
elif isinstance(contrast_def, (list, tuple)):
con_vals = contrast_def
for cidx, con in enumerate(contrast_def):
if not isinstance(con, np.ndarray):
raise ValueError('contrast_def at index %i is not an'
' array' % cidx)
else:
raise ValueError('contrast_def must be an array or list of arrays')
n_runs = len(self.labels_)
if len(con_vals) != n_runs:
warn('One contrast given, assuming it for all %d runs' % n_runs)
con_vals = con_vals * n_runs
if isinstance(output_type, _basestring):
if output_type not in ['z_score', 'stat', 'p_value', 'effect_size',
'effect_variance']:
raise ValueError('output_type must be one of "z_score", "stat",'
' "p_value","effect_size" or "effect_variance"')
else:
raise ValueError('output_type must be one of "z_score", "stat",'
' "p_value","effect_size" or "effect_variance"')
if self.memory is not None:
arg_ignore = ['labels', 'results']
mem_contrast = self.memory.cache(_fixed_effect_contrast,
ignore=arg_ignore)
else:
mem_contrast = _fixed_effect_contrast
contrast = mem_contrast(self.labels_, self.results_, con_vals,
stat_type)
estimate_ = getattr(contrast, output_type)()
# Prepare the returned images
output = self.masker_.inverse_transform(estimate_)
if contrast_name is None:
contrast_name = str(con_vals)
output.get_header()['descrip'] = (
'%s of contrast %s' % (output_type, contrast_name))
return output
| bsd-3-clause |
viisar/brew | brew/selection/dynamic/base.py | 3 | 1144 | from sklearn.neighbors.classification import KNeighborsClassifier
from abc import abstractmethod
class DCS(object):
@abstractmethod
def select(self, ensemble, x):
pass
def __init__(self, Xval, yval, K=5, weighted=False, knn=None):
self.Xval = Xval
self.yval = yval
self.K = K
if knn is None:
self.knn = KNeighborsClassifier(n_neighbors=K, algorithm='brute')
else:
self.knn = knn
self.knn.fit(Xval, yval)
self.weighted = weighted
def get_neighbors(self, x, return_distance=False):
# obtain the K nearest neighbors of test sample in the validation set
if not return_distance:
[idx] = self.knn.kneighbors(x,
return_distance=return_distance)
else:
rd = return_distance
[dists], [idx] = self.knn.kneighbors(x, return_distance=rd)
X_nn = self.Xval[idx] # k neighbors
y_nn = self.yval[idx] # k neighbors target
if return_distance:
return X_nn, y_nn, dists
else:
return X_nn, y_nn
| mit |
luo66/scikit-learn | sklearn/metrics/scorer.py | 211 | 13141 | """
The :mod:`sklearn.metrics.scorer` submodule implements a flexible
interface for model selection and evaluation using
arbitrary score functions.
A scorer object is a callable that can be passed to
:class:`sklearn.grid_search.GridSearchCV` or
:func:`sklearn.cross_validation.cross_val_score` as the ``scoring`` parameter,
to specify how a model should be evaluated.
The signature of the call is ``(estimator, X, y)`` where ``estimator``
is the model to be evaluated, ``X`` is the test data and ``y`` is the
ground truth labeling (or ``None`` in the case of unsupervised models).
"""
# Authors: Andreas Mueller <amueller@ais.uni-bonn.de>
# Lars Buitinck <L.J.Buitinck@uva.nl>
# Arnaud Joly <arnaud.v.joly@gmail.com>
# License: Simplified BSD
from abc import ABCMeta, abstractmethod
from functools import partial
import numpy as np
from . import (r2_score, median_absolute_error, mean_absolute_error,
mean_squared_error, accuracy_score, f1_score,
roc_auc_score, average_precision_score,
precision_score, recall_score, log_loss)
from .cluster import adjusted_rand_score
from ..utils.multiclass import type_of_target
from ..externals import six
from ..base import is_regressor
class _BaseScorer(six.with_metaclass(ABCMeta, object)):
def __init__(self, score_func, sign, kwargs):
self._kwargs = kwargs
self._score_func = score_func
self._sign = sign
@abstractmethod
def __call__(self, estimator, X, y, sample_weight=None):
pass
def __repr__(self):
kwargs_string = "".join([", %s=%s" % (str(k), str(v))
for k, v in self._kwargs.items()])
return ("make_scorer(%s%s%s%s)"
% (self._score_func.__name__,
"" if self._sign > 0 else ", greater_is_better=False",
self._factory_args(), kwargs_string))
def _factory_args(self):
"""Return non-default make_scorer arguments for repr."""
return ""
class _PredictScorer(_BaseScorer):
def __call__(self, estimator, X, y_true, sample_weight=None):
"""Evaluate predicted target values for X relative to y_true.
Parameters
----------
estimator : object
Trained estimator to use for scoring. Must have a predict_proba
method; the output of that is used to compute the score.
X : array-like or sparse matrix
Test data that will be fed to estimator.predict.
y_true : array-like
Gold standard target values for X.
sample_weight : array-like, optional (default=None)
Sample weights.
Returns
-------
score : float
Score function applied to prediction of estimator on X.
"""
y_pred = estimator.predict(X)
if sample_weight is not None:
return self._sign * self._score_func(y_true, y_pred,
sample_weight=sample_weight,
**self._kwargs)
else:
return self._sign * self._score_func(y_true, y_pred,
**self._kwargs)
class _ProbaScorer(_BaseScorer):
def __call__(self, clf, X, y, sample_weight=None):
"""Evaluate predicted probabilities for X relative to y_true.
Parameters
----------
clf : object
Trained classifier to use for scoring. Must have a predict_proba
method; the output of that is used to compute the score.
X : array-like or sparse matrix
Test data that will be fed to clf.predict_proba.
y : array-like
Gold standard target values for X. These must be class labels,
not probabilities.
sample_weight : array-like, optional (default=None)
Sample weights.
Returns
-------
score : float
Score function applied to prediction of estimator on X.
"""
y_pred = clf.predict_proba(X)
if sample_weight is not None:
return self._sign * self._score_func(y, y_pred,
sample_weight=sample_weight,
**self._kwargs)
else:
return self._sign * self._score_func(y, y_pred, **self._kwargs)
def _factory_args(self):
return ", needs_proba=True"
class _ThresholdScorer(_BaseScorer):
def __call__(self, clf, X, y, sample_weight=None):
"""Evaluate decision function output for X relative to y_true.
Parameters
----------
clf : object
Trained classifier to use for scoring. Must have either a
decision_function method or a predict_proba method; the output of
that is used to compute the score.
X : array-like or sparse matrix
Test data that will be fed to clf.decision_function or
clf.predict_proba.
y : array-like
Gold standard target values for X. These must be class labels,
not decision function values.
sample_weight : array-like, optional (default=None)
Sample weights.
Returns
-------
score : float
Score function applied to prediction of estimator on X.
"""
y_type = type_of_target(y)
if y_type not in ("binary", "multilabel-indicator"):
raise ValueError("{0} format is not supported".format(y_type))
if is_regressor(clf):
y_pred = clf.predict(X)
else:
try:
y_pred = clf.decision_function(X)
# For multi-output multi-class estimator
if isinstance(y_pred, list):
y_pred = np.vstack(p for p in y_pred).T
except (NotImplementedError, AttributeError):
y_pred = clf.predict_proba(X)
if y_type == "binary":
y_pred = y_pred[:, 1]
elif isinstance(y_pred, list):
y_pred = np.vstack([p[:, -1] for p in y_pred]).T
if sample_weight is not None:
return self._sign * self._score_func(y, y_pred,
sample_weight=sample_weight,
**self._kwargs)
else:
return self._sign * self._score_func(y, y_pred, **self._kwargs)
def _factory_args(self):
return ", needs_threshold=True"
def get_scorer(scoring):
if isinstance(scoring, six.string_types):
try:
scorer = SCORERS[scoring]
except KeyError:
raise ValueError('%r is not a valid scoring value. '
'Valid options are %s'
% (scoring, sorted(SCORERS.keys())))
else:
scorer = scoring
return scorer
def _passthrough_scorer(estimator, *args, **kwargs):
"""Function that wraps estimator.score"""
return estimator.score(*args, **kwargs)
def check_scoring(estimator, scoring=None, allow_none=False):
"""Determine scorer from user options.
A TypeError will be thrown if the estimator cannot be scored.
Parameters
----------
estimator : estimator object implementing 'fit'
The object to use to fit the data.
scoring : string, callable or None, optional, default: None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
allow_none : boolean, optional, default: False
If no scoring is specified and the estimator has no score function, we
can either return None or raise an exception.
Returns
-------
scoring : callable
A scorer callable object / function with signature
``scorer(estimator, X, y)``.
"""
has_scoring = scoring is not None
if not hasattr(estimator, 'fit'):
raise TypeError("estimator should a be an estimator implementing "
"'fit' method, %r was passed" % estimator)
elif has_scoring:
return get_scorer(scoring)
elif hasattr(estimator, 'score'):
return _passthrough_scorer
elif allow_none:
return None
else:
raise TypeError(
"If no scoring is specified, the estimator passed should "
"have a 'score' method. The estimator %r does not." % estimator)
def make_scorer(score_func, greater_is_better=True, needs_proba=False,
needs_threshold=False, **kwargs):
"""Make a scorer from a performance metric or loss function.
This factory function wraps scoring functions for use in GridSearchCV
and cross_val_score. It takes a score function, such as ``accuracy_score``,
``mean_squared_error``, ``adjusted_rand_index`` or ``average_precision``
and returns a callable that scores an estimator's output.
Read more in the :ref:`User Guide <scoring>`.
Parameters
----------
score_func : callable,
Score function (or loss function) with signature
``score_func(y, y_pred, **kwargs)``.
greater_is_better : boolean, default=True
Whether score_func is a score function (default), meaning high is good,
or a loss function, meaning low is good. In the latter case, the
scorer object will sign-flip the outcome of the score_func.
needs_proba : boolean, default=False
Whether score_func requires predict_proba to get probability estimates
out of a classifier.
needs_threshold : boolean, default=False
Whether score_func takes a continuous decision certainty.
This only works for binary classification using estimators that
have either a decision_function or predict_proba method.
For example ``average_precision`` or the area under the roc curve
can not be computed using discrete predictions alone.
**kwargs : additional arguments
Additional parameters to be passed to score_func.
Returns
-------
scorer : callable
Callable object that returns a scalar score; greater is better.
Examples
--------
>>> from sklearn.metrics import fbeta_score, make_scorer
>>> ftwo_scorer = make_scorer(fbeta_score, beta=2)
>>> ftwo_scorer
make_scorer(fbeta_score, beta=2)
>>> from sklearn.grid_search import GridSearchCV
>>> from sklearn.svm import LinearSVC
>>> grid = GridSearchCV(LinearSVC(), param_grid={'C': [1, 10]},
... scoring=ftwo_scorer)
"""
sign = 1 if greater_is_better else -1
if needs_proba and needs_threshold:
raise ValueError("Set either needs_proba or needs_threshold to True,"
" but not both.")
if needs_proba:
cls = _ProbaScorer
elif needs_threshold:
cls = _ThresholdScorer
else:
cls = _PredictScorer
return cls(score_func, sign, kwargs)
# Standard regression scores
r2_scorer = make_scorer(r2_score)
mean_squared_error_scorer = make_scorer(mean_squared_error,
greater_is_better=False)
mean_absolute_error_scorer = make_scorer(mean_absolute_error,
greater_is_better=False)
median_absolute_error_scorer = make_scorer(median_absolute_error,
greater_is_better=False)
# Standard Classification Scores
accuracy_scorer = make_scorer(accuracy_score)
f1_scorer = make_scorer(f1_score)
# Score functions that need decision values
roc_auc_scorer = make_scorer(roc_auc_score, greater_is_better=True,
needs_threshold=True)
average_precision_scorer = make_scorer(average_precision_score,
needs_threshold=True)
precision_scorer = make_scorer(precision_score)
recall_scorer = make_scorer(recall_score)
# Score function for probabilistic classification
log_loss_scorer = make_scorer(log_loss, greater_is_better=False,
needs_proba=True)
# Clustering scores
adjusted_rand_scorer = make_scorer(adjusted_rand_score)
SCORERS = dict(r2=r2_scorer,
median_absolute_error=median_absolute_error_scorer,
mean_absolute_error=mean_absolute_error_scorer,
mean_squared_error=mean_squared_error_scorer,
accuracy=accuracy_scorer, roc_auc=roc_auc_scorer,
average_precision=average_precision_scorer,
log_loss=log_loss_scorer,
adjusted_rand_score=adjusted_rand_scorer)
for name, metric in [('precision', precision_score),
('recall', recall_score), ('f1', f1_score)]:
SCORERS[name] = make_scorer(metric)
for average in ['macro', 'micro', 'samples', 'weighted']:
qualified_name = '{0}_{1}'.format(name, average)
SCORERS[qualified_name] = make_scorer(partial(metric, pos_label=None,
average=average))
| bsd-3-clause |
zaxtax/scikit-learn | sklearn/linear_model/stochastic_gradient.py | 34 | 50761 | # Authors: Peter Prettenhofer <peter.prettenhofer@gmail.com> (main author)
# Mathieu Blondel (partial_fit support)
#
# License: BSD 3 clause
"""Classification and regression using Stochastic Gradient Descent (SGD)."""
import numpy as np
from abc import ABCMeta, abstractmethod
from ..externals.joblib import Parallel, delayed
from .base import LinearClassifierMixin, SparseCoefMixin
from .base import make_dataset
from ..base import BaseEstimator, RegressorMixin
from ..feature_selection.from_model import _LearntSelectorMixin
from ..utils import (check_array, check_random_state, check_X_y,
deprecated)
from ..utils.extmath import safe_sparse_dot
from ..utils.multiclass import _check_partial_fit_first_call
from ..utils.validation import check_is_fitted
from ..externals import six
from .sgd_fast import plain_sgd, average_sgd
from ..utils.fixes import astype
from ..utils import compute_class_weight
from .sgd_fast import Hinge
from .sgd_fast import SquaredHinge
from .sgd_fast import Log
from .sgd_fast import ModifiedHuber
from .sgd_fast import SquaredLoss
from .sgd_fast import Huber
from .sgd_fast import EpsilonInsensitive
from .sgd_fast import SquaredEpsilonInsensitive
LEARNING_RATE_TYPES = {"constant": 1, "optimal": 2, "invscaling": 3,
"pa1": 4, "pa2": 5}
PENALTY_TYPES = {"none": 0, "l2": 2, "l1": 1, "elasticnet": 3}
DEFAULT_EPSILON = 0.1
# Default value of ``epsilon`` parameter.
class BaseSGD(six.with_metaclass(ABCMeta, BaseEstimator, SparseCoefMixin)):
"""Base class for SGD classification and regression."""
def __init__(self, loss, penalty='l2', alpha=0.0001, C=1.0,
l1_ratio=0.15, fit_intercept=True, n_iter=5, shuffle=True,
verbose=0, epsilon=0.1, random_state=None,
learning_rate="optimal", eta0=0.0, power_t=0.5,
warm_start=False, average=False):
self.loss = loss
self.penalty = penalty
self.learning_rate = learning_rate
self.epsilon = epsilon
self.alpha = alpha
self.C = C
self.l1_ratio = l1_ratio
self.fit_intercept = fit_intercept
self.n_iter = n_iter
self.shuffle = shuffle
self.random_state = random_state
self.verbose = verbose
self.eta0 = eta0
self.power_t = power_t
self.warm_start = warm_start
self.average = average
self._validate_params()
self.coef_ = None
if self.average > 0:
self.standard_coef_ = None
self.average_coef_ = None
# iteration count for learning rate schedule
# must not be int (e.g. if ``learning_rate=='optimal'``)
self.t_ = None
def set_params(self, *args, **kwargs):
super(BaseSGD, self).set_params(*args, **kwargs)
self._validate_params()
return self
@abstractmethod
def fit(self, X, y):
"""Fit model."""
def _validate_params(self):
"""Validate input params. """
if not isinstance(self.shuffle, bool):
raise ValueError("shuffle must be either True or False")
if self.n_iter <= 0:
raise ValueError("n_iter must be > zero")
if not (0.0 <= self.l1_ratio <= 1.0):
raise ValueError("l1_ratio must be in [0, 1]")
if self.alpha < 0.0:
raise ValueError("alpha must be >= 0")
if self.learning_rate in ("constant", "invscaling"):
if self.eta0 <= 0.0:
raise ValueError("eta0 must be > 0")
if self.learning_rate == "optimal" and self.alpha == 0:
raise ValueError("alpha must be > 0 since "
"learning_rate is 'optimal'. alpha is used "
"to compute the optimal learning rate.")
# raises ValueError if not registered
self._get_penalty_type(self.penalty)
self._get_learning_rate_type(self.learning_rate)
if self.loss not in self.loss_functions:
raise ValueError("The loss %s is not supported. " % self.loss)
def _get_loss_function(self, loss):
"""Get concrete ``LossFunction`` object for str ``loss``. """
try:
loss_ = self.loss_functions[loss]
loss_class, args = loss_[0], loss_[1:]
if loss in ('huber', 'epsilon_insensitive',
'squared_epsilon_insensitive'):
args = (self.epsilon, )
return loss_class(*args)
except KeyError:
raise ValueError("The loss %s is not supported. " % loss)
def _get_learning_rate_type(self, learning_rate):
try:
return LEARNING_RATE_TYPES[learning_rate]
except KeyError:
raise ValueError("learning rate %s "
"is not supported. " % learning_rate)
def _get_penalty_type(self, penalty):
penalty = str(penalty).lower()
try:
return PENALTY_TYPES[penalty]
except KeyError:
raise ValueError("Penalty %s is not supported. " % penalty)
def _validate_sample_weight(self, sample_weight, n_samples):
"""Set the sample weight array."""
if sample_weight is None:
# uniform sample weights
sample_weight = np.ones(n_samples, dtype=np.float64, order='C')
else:
# user-provided array
sample_weight = np.asarray(sample_weight, dtype=np.float64,
order="C")
if sample_weight.shape[0] != n_samples:
raise ValueError("Shapes of X and sample_weight do not match.")
return sample_weight
def _allocate_parameter_mem(self, n_classes, n_features, coef_init=None,
intercept_init=None):
"""Allocate mem for parameters; initialize if provided."""
if n_classes > 2:
# allocate coef_ for multi-class
if coef_init is not None:
coef_init = np.asarray(coef_init, order="C")
if coef_init.shape != (n_classes, n_features):
raise ValueError("Provided ``coef_`` does not match "
"dataset. ")
self.coef_ = coef_init
else:
self.coef_ = np.zeros((n_classes, n_features),
dtype=np.float64, order="C")
# allocate intercept_ for multi-class
if intercept_init is not None:
intercept_init = np.asarray(intercept_init, order="C")
if intercept_init.shape != (n_classes, ):
raise ValueError("Provided intercept_init "
"does not match dataset.")
self.intercept_ = intercept_init
else:
self.intercept_ = np.zeros(n_classes, dtype=np.float64,
order="C")
else:
# allocate coef_ for binary problem
if coef_init is not None:
coef_init = np.asarray(coef_init, dtype=np.float64,
order="C")
coef_init = coef_init.ravel()
if coef_init.shape != (n_features,):
raise ValueError("Provided coef_init does not "
"match dataset.")
self.coef_ = coef_init
else:
self.coef_ = np.zeros(n_features,
dtype=np.float64,
order="C")
# allocate intercept_ for binary problem
if intercept_init is not None:
intercept_init = np.asarray(intercept_init, dtype=np.float64)
if intercept_init.shape != (1,) and intercept_init.shape != ():
raise ValueError("Provided intercept_init "
"does not match dataset.")
self.intercept_ = intercept_init.reshape(1,)
else:
self.intercept_ = np.zeros(1, dtype=np.float64, order="C")
# initialize average parameters
if self.average > 0:
self.standard_coef_ = self.coef_
self.standard_intercept_ = self.intercept_
self.average_coef_ = np.zeros(self.coef_.shape,
dtype=np.float64,
order="C")
self.average_intercept_ = np.zeros(self.standard_intercept_.shape,
dtype=np.float64,
order="C")
def _prepare_fit_binary(est, y, i):
"""Initialization for fit_binary.
Returns y, coef, intercept.
"""
y_i = np.ones(y.shape, dtype=np.float64, order="C")
y_i[y != est.classes_[i]] = -1.0
average_intercept = 0
average_coef = None
if len(est.classes_) == 2:
if not est.average:
coef = est.coef_.ravel()
intercept = est.intercept_[0]
else:
coef = est.standard_coef_.ravel()
intercept = est.standard_intercept_[0]
average_coef = est.average_coef_.ravel()
average_intercept = est.average_intercept_[0]
else:
if not est.average:
coef = est.coef_[i]
intercept = est.intercept_[i]
else:
coef = est.standard_coef_[i]
intercept = est.standard_intercept_[i]
average_coef = est.average_coef_[i]
average_intercept = est.average_intercept_[i]
return y_i, coef, intercept, average_coef, average_intercept
def fit_binary(est, i, X, y, alpha, C, learning_rate, n_iter,
pos_weight, neg_weight, sample_weight):
"""Fit a single binary classifier.
The i'th class is considered the "positive" class.
"""
# if average is not true, average_coef, and average_intercept will be
# unused
y_i, coef, intercept, average_coef, average_intercept = \
_prepare_fit_binary(est, y, i)
assert y_i.shape[0] == y.shape[0] == sample_weight.shape[0]
dataset, intercept_decay = make_dataset(X, y_i, sample_weight)
penalty_type = est._get_penalty_type(est.penalty)
learning_rate_type = est._get_learning_rate_type(learning_rate)
# XXX should have random_state_!
random_state = check_random_state(est.random_state)
# numpy mtrand expects a C long which is a signed 32 bit integer under
# Windows
seed = random_state.randint(0, np.iinfo(np.int32).max)
if not est.average:
return plain_sgd(coef, intercept, est.loss_function,
penalty_type, alpha, C, est.l1_ratio,
dataset, n_iter, int(est.fit_intercept),
int(est.verbose), int(est.shuffle), seed,
pos_weight, neg_weight,
learning_rate_type, est.eta0,
est.power_t, est.t_, intercept_decay)
else:
standard_coef, standard_intercept, average_coef, \
average_intercept = average_sgd(coef, intercept, average_coef,
average_intercept,
est.loss_function, penalty_type,
alpha, C, est.l1_ratio, dataset,
n_iter, int(est.fit_intercept),
int(est.verbose), int(est.shuffle),
seed, pos_weight, neg_weight,
learning_rate_type, est.eta0,
est.power_t, est.t_,
intercept_decay,
est.average)
if len(est.classes_) == 2:
est.average_intercept_[0] = average_intercept
else:
est.average_intercept_[i] = average_intercept
return standard_coef, standard_intercept
class BaseSGDClassifier(six.with_metaclass(ABCMeta, BaseSGD,
LinearClassifierMixin)):
loss_functions = {
"hinge": (Hinge, 1.0),
"squared_hinge": (SquaredHinge, 1.0),
"perceptron": (Hinge, 0.0),
"log": (Log, ),
"modified_huber": (ModifiedHuber, ),
"squared_loss": (SquaredLoss, ),
"huber": (Huber, DEFAULT_EPSILON),
"epsilon_insensitive": (EpsilonInsensitive, DEFAULT_EPSILON),
"squared_epsilon_insensitive": (SquaredEpsilonInsensitive,
DEFAULT_EPSILON),
}
@abstractmethod
def __init__(self, loss="hinge", penalty='l2', alpha=0.0001, l1_ratio=0.15,
fit_intercept=True, n_iter=5, shuffle=True, verbose=0,
epsilon=DEFAULT_EPSILON, n_jobs=1, random_state=None,
learning_rate="optimal", eta0=0.0, power_t=0.5,
class_weight=None, warm_start=False, average=False):
super(BaseSGDClassifier, self).__init__(loss=loss, penalty=penalty,
alpha=alpha, l1_ratio=l1_ratio,
fit_intercept=fit_intercept,
n_iter=n_iter, shuffle=shuffle,
verbose=verbose,
epsilon=epsilon,
random_state=random_state,
learning_rate=learning_rate,
eta0=eta0, power_t=power_t,
warm_start=warm_start,
average=average)
self.class_weight = class_weight
self.classes_ = None
self.n_jobs = int(n_jobs)
def _partial_fit(self, X, y, alpha, C,
loss, learning_rate, n_iter,
classes, sample_weight,
coef_init, intercept_init):
X, y = check_X_y(X, y, 'csr', dtype=np.float64, order="C")
n_samples, n_features = X.shape
self._validate_params()
_check_partial_fit_first_call(self, classes)
n_classes = self.classes_.shape[0]
# Allocate datastructures from input arguments
self._expanded_class_weight = compute_class_weight(self.class_weight,
self.classes_, y)
sample_weight = self._validate_sample_weight(sample_weight, n_samples)
if self.coef_ is None or coef_init is not None:
self._allocate_parameter_mem(n_classes, n_features,
coef_init, intercept_init)
elif n_features != self.coef_.shape[-1]:
raise ValueError("Number of features %d does not match previous "
"data %d." % (n_features, self.coef_.shape[-1]))
self.loss_function = self._get_loss_function(loss)
if self.t_ is None:
self.t_ = 1.0
# delegate to concrete training procedure
if n_classes > 2:
self._fit_multiclass(X, y, alpha=alpha, C=C,
learning_rate=learning_rate,
sample_weight=sample_weight, n_iter=n_iter)
elif n_classes == 2:
self._fit_binary(X, y, alpha=alpha, C=C,
learning_rate=learning_rate,
sample_weight=sample_weight, n_iter=n_iter)
else:
raise ValueError("The number of class labels must be "
"greater than one.")
return self
def _fit(self, X, y, alpha, C, loss, learning_rate, coef_init=None,
intercept_init=None, sample_weight=None):
if hasattr(self, "classes_"):
self.classes_ = None
X, y = check_X_y(X, y, 'csr', dtype=np.float64, order="C")
n_samples, n_features = X.shape
# labels can be encoded as float, int, or string literals
# np.unique sorts in asc order; largest class id is positive class
classes = np.unique(y)
if self.warm_start and self.coef_ is not None:
if coef_init is None:
coef_init = self.coef_
if intercept_init is None:
intercept_init = self.intercept_
else:
self.coef_ = None
self.intercept_ = None
if self.average > 0:
self.standard_coef_ = self.coef_
self.standard_intercept_ = self.intercept_
self.average_coef_ = None
self.average_intercept_ = None
# Clear iteration count for multiple call to fit.
self.t_ = None
self._partial_fit(X, y, alpha, C, loss, learning_rate, self.n_iter,
classes, sample_weight, coef_init, intercept_init)
return self
def _fit_binary(self, X, y, alpha, C, sample_weight,
learning_rate, n_iter):
"""Fit a binary classifier on X and y. """
coef, intercept = fit_binary(self, 1, X, y, alpha, C,
learning_rate, n_iter,
self._expanded_class_weight[1],
self._expanded_class_weight[0],
sample_weight)
self.t_ += n_iter * X.shape[0]
# need to be 2d
if self.average > 0:
if self.average <= self.t_ - 1:
self.coef_ = self.average_coef_.reshape(1, -1)
self.intercept_ = self.average_intercept_
else:
self.coef_ = self.standard_coef_.reshape(1, -1)
self.standard_intercept_ = np.atleast_1d(intercept)
self.intercept_ = self.standard_intercept_
else:
self.coef_ = coef.reshape(1, -1)
# intercept is a float, need to convert it to an array of length 1
self.intercept_ = np.atleast_1d(intercept)
def _fit_multiclass(self, X, y, alpha, C, learning_rate,
sample_weight, n_iter):
"""Fit a multi-class classifier by combining binary classifiers
Each binary classifier predicts one class versus all others. This
strategy is called OVA: One Versus All.
"""
# Use joblib to fit OvA in parallel.
result = Parallel(n_jobs=self.n_jobs, backend="threading",
verbose=self.verbose)(
delayed(fit_binary)(self, i, X, y, alpha, C, learning_rate,
n_iter, self._expanded_class_weight[i], 1.,
sample_weight)
for i in range(len(self.classes_)))
for i, (_, intercept) in enumerate(result):
self.intercept_[i] = intercept
self.t_ += n_iter * X.shape[0]
if self.average > 0:
if self.average <= self.t_ - 1.0:
self.coef_ = self.average_coef_
self.intercept_ = self.average_intercept_
else:
self.coef_ = self.standard_coef_
self.standard_intercept_ = np.atleast_1d(self.intercept_)
self.intercept_ = self.standard_intercept_
def partial_fit(self, X, y, classes=None, sample_weight=None):
"""Fit linear model with Stochastic Gradient Descent.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Subset of the training data
y : numpy array, shape (n_samples,)
Subset of the target values
classes : array, shape (n_classes,)
Classes across all calls to partial_fit.
Can be obtained by via `np.unique(y_all)`, where y_all is the
target vector of the entire dataset.
This argument is required for the first call to partial_fit
and can be omitted in the subsequent calls.
Note that y doesn't need to contain all labels in `classes`.
sample_weight : array-like, shape (n_samples,), optional
Weights applied to individual samples.
If not provided, uniform weights are assumed.
Returns
-------
self : returns an instance of self.
"""
if self.class_weight in ['balanced', 'auto']:
raise ValueError("class_weight '{0}' is not supported for "
"partial_fit. In order to use 'balanced' weights,"
" use compute_class_weight('{0}', classes, y). "
"In place of y you can us a large enough sample "
"of the full training set target to properly "
"estimate the class frequency distributions. "
"Pass the resulting weights as the class_weight "
"parameter.".format(self.class_weight))
return self._partial_fit(X, y, alpha=self.alpha, C=1.0, loss=self.loss,
learning_rate=self.learning_rate, n_iter=1,
classes=classes, sample_weight=sample_weight,
coef_init=None, intercept_init=None)
def fit(self, X, y, coef_init=None, intercept_init=None,
sample_weight=None):
"""Fit linear model with Stochastic Gradient Descent.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data
y : numpy array, shape (n_samples,)
Target values
coef_init : array, shape (n_classes, n_features)
The initial coefficients to warm-start the optimization.
intercept_init : array, shape (n_classes,)
The initial intercept to warm-start the optimization.
sample_weight : array-like, shape (n_samples,), optional
Weights applied to individual samples.
If not provided, uniform weights are assumed. These weights will
be multiplied with class_weight (passed through the
constructor) if class_weight is specified
Returns
-------
self : returns an instance of self.
"""
return self._fit(X, y, alpha=self.alpha, C=1.0,
loss=self.loss, learning_rate=self.learning_rate,
coef_init=coef_init, intercept_init=intercept_init,
sample_weight=sample_weight)
class SGDClassifier(BaseSGDClassifier, _LearntSelectorMixin):
"""Linear classifiers (SVM, logistic regression, a.o.) with SGD training.
This estimator implements regularized linear models with stochastic
gradient descent (SGD) learning: the gradient of the loss is estimated
each sample at a time and the model is updated along the way with a
decreasing strength schedule (aka learning rate). SGD allows minibatch
(online/out-of-core) learning, see the partial_fit method.
For best results using the default learning rate schedule, the data should
have zero mean and unit variance.
This implementation works with data represented as dense or sparse arrays
of floating point values for the features. The model it fits can be
controlled with the loss parameter; by default, it fits a linear support
vector machine (SVM).
The regularizer is a penalty added to the loss function that shrinks model
parameters towards the zero vector using either the squared euclidean norm
L2 or the absolute norm L1 or a combination of both (Elastic Net). If the
parameter update crosses the 0.0 value because of the regularizer, the
update is truncated to 0.0 to allow for learning sparse models and achieve
online feature selection.
Read more in the :ref:`User Guide <sgd>`.
Parameters
----------
loss : str, 'hinge', 'log', 'modified_huber', 'squared_hinge',\
'perceptron', or a regression loss: 'squared_loss', 'huber',\
'epsilon_insensitive', or 'squared_epsilon_insensitive'
The loss function to be used. Defaults to 'hinge', which gives a
linear SVM.
The 'log' loss gives logistic regression, a probabilistic classifier.
'modified_huber' is another smooth loss that brings tolerance to
outliers as well as probability estimates.
'squared_hinge' is like hinge but is quadratically penalized.
'perceptron' is the linear loss used by the perceptron algorithm.
The other losses are designed for regression but can be useful in
classification as well; see SGDRegressor for a description.
penalty : str, 'none', 'l2', 'l1', or 'elasticnet'
The penalty (aka regularization term) to be used. Defaults to 'l2'
which is the standard regularizer for linear SVM models. 'l1' and
'elasticnet' might bring sparsity to the model (feature selection)
not achievable with 'l2'.
alpha : float
Constant that multiplies the regularization term. Defaults to 0.0001
Also used to compute learning_rate when set to 'optimal'.
l1_ratio : float
The Elastic Net mixing parameter, with 0 <= l1_ratio <= 1.
l1_ratio=0 corresponds to L2 penalty, l1_ratio=1 to L1.
Defaults to 0.15.
fit_intercept : bool
Whether the intercept should be estimated or not. If False, the
data is assumed to be already centered. Defaults to True.
n_iter : int, optional
The number of passes over the training data (aka epochs). The number
of iterations is set to 1 if using partial_fit.
Defaults to 5.
shuffle : bool, optional
Whether or not the training data should be shuffled after each epoch.
Defaults to True.
random_state : int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use when
shuffling the data.
verbose : integer, optional
The verbosity level
epsilon : float
Epsilon in the epsilon-insensitive loss functions; only if `loss` is
'huber', 'epsilon_insensitive', or 'squared_epsilon_insensitive'.
For 'huber', determines the threshold at which it becomes less
important to get the prediction exactly right.
For epsilon-insensitive, any differences between the current prediction
and the correct label are ignored if they are less than this threshold.
n_jobs : integer, optional
The number of CPUs to use to do the OVA (One Versus All, for
multi-class problems) computation. -1 means 'all CPUs'. Defaults
to 1.
learning_rate : string, optional
The learning rate schedule:
constant: eta = eta0
optimal: eta = 1.0 / (alpha * (t + t0)) [default]
invscaling: eta = eta0 / pow(t, power_t)
where t0 is chosen by a heuristic proposed by Leon Bottou.
eta0 : double
The initial learning rate for the 'constant' or 'invscaling'
schedules. The default value is 0.0 as eta0 is not used by the
default schedule 'optimal'.
power_t : double
The exponent for inverse scaling learning rate [default 0.5].
class_weight : dict, {class_label: weight} or "balanced" or None, optional
Preset for the class_weight fit parameter.
Weights associated with classes. If not given, all classes
are supposed to have weight one.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
warm_start : bool, optional
When set to True, reuse the solution of the previous call to fit as
initialization, otherwise, just erase the previous solution.
average : bool or int, optional
When set to True, computes the averaged SGD weights and stores the
result in the ``coef_`` attribute. If set to an int greater than 1,
averaging will begin once the total number of samples seen reaches
average. So average=10 will begin averaging after seeing 10 samples.
Attributes
----------
coef_ : array, shape (1, n_features) if n_classes == 2 else (n_classes,\
n_features)
Weights assigned to the features.
intercept_ : array, shape (1,) if n_classes == 2 else (n_classes,)
Constants in decision function.
Examples
--------
>>> import numpy as np
>>> from sklearn import linear_model
>>> X = np.array([[-1, -1], [-2, -1], [1, 1], [2, 1]])
>>> Y = np.array([1, 1, 2, 2])
>>> clf = linear_model.SGDClassifier()
>>> clf.fit(X, Y)
... #doctest: +NORMALIZE_WHITESPACE
SGDClassifier(alpha=0.0001, average=False, class_weight=None, epsilon=0.1,
eta0=0.0, fit_intercept=True, l1_ratio=0.15,
learning_rate='optimal', loss='hinge', n_iter=5, n_jobs=1,
penalty='l2', power_t=0.5, random_state=None, shuffle=True,
verbose=0, warm_start=False)
>>> print(clf.predict([[-0.8, -1]]))
[1]
See also
--------
LinearSVC, LogisticRegression, Perceptron
"""
def __init__(self, loss="hinge", penalty='l2', alpha=0.0001, l1_ratio=0.15,
fit_intercept=True, n_iter=5, shuffle=True, verbose=0,
epsilon=DEFAULT_EPSILON, n_jobs=1, random_state=None,
learning_rate="optimal", eta0=0.0, power_t=0.5,
class_weight=None, warm_start=False, average=False):
super(SGDClassifier, self).__init__(
loss=loss, penalty=penalty, alpha=alpha, l1_ratio=l1_ratio,
fit_intercept=fit_intercept, n_iter=n_iter, shuffle=shuffle,
verbose=verbose, epsilon=epsilon, n_jobs=n_jobs,
random_state=random_state, learning_rate=learning_rate, eta0=eta0,
power_t=power_t, class_weight=class_weight, warm_start=warm_start,
average=average)
def _check_proba(self):
check_is_fitted(self, "t_")
if self.loss not in ("log", "modified_huber"):
raise AttributeError("probability estimates are not available for"
" loss=%r" % self.loss)
@property
def predict_proba(self):
"""Probability estimates.
This method is only available for log loss and modified Huber loss.
Multiclass probability estimates are derived from binary (one-vs.-rest)
estimates by simple normalization, as recommended by Zadrozny and
Elkan.
Binary probability estimates for loss="modified_huber" are given by
(clip(decision_function(X), -1, 1) + 1) / 2.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Returns
-------
array, shape (n_samples, n_classes)
Returns the probability of the sample for each class in the model,
where classes are ordered as they are in `self.classes_`.
References
----------
Zadrozny and Elkan, "Transforming classifier scores into multiclass
probability estimates", SIGKDD'02,
http://www.research.ibm.com/people/z/zadrozny/kdd2002-Transf.pdf
The justification for the formula in the loss="modified_huber"
case is in the appendix B in:
http://jmlr.csail.mit.edu/papers/volume2/zhang02c/zhang02c.pdf
"""
self._check_proba()
return self._predict_proba
def _predict_proba(self, X):
if self.loss == "log":
return self._predict_proba_lr(X)
elif self.loss == "modified_huber":
binary = (len(self.classes_) == 2)
scores = self.decision_function(X)
if binary:
prob2 = np.ones((scores.shape[0], 2))
prob = prob2[:, 1]
else:
prob = scores
np.clip(scores, -1, 1, prob)
prob += 1.
prob /= 2.
if binary:
prob2[:, 0] -= prob
prob = prob2
else:
# the above might assign zero to all classes, which doesn't
# normalize neatly; work around this to produce uniform
# probabilities
prob_sum = prob.sum(axis=1)
all_zero = (prob_sum == 0)
if np.any(all_zero):
prob[all_zero, :] = 1
prob_sum[all_zero] = len(self.classes_)
# normalize
prob /= prob_sum.reshape((prob.shape[0], -1))
return prob
else:
raise NotImplementedError("predict_(log_)proba only supported when"
" loss='log' or loss='modified_huber' "
"(%r given)" % self.loss)
@property
def predict_log_proba(self):
"""Log of probability estimates.
This method is only available for log loss and modified Huber loss.
When loss="modified_huber", probability estimates may be hard zeros
and ones, so taking the logarithm is not possible.
See ``predict_proba`` for details.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Returns
-------
T : array-like, shape (n_samples, n_classes)
Returns the log-probability of the sample for each class in the
model, where classes are ordered as they are in
`self.classes_`.
"""
self._check_proba()
return self._predict_log_proba
def _predict_log_proba(self, X):
return np.log(self.predict_proba(X))
class BaseSGDRegressor(BaseSGD, RegressorMixin):
loss_functions = {
"squared_loss": (SquaredLoss, ),
"huber": (Huber, DEFAULT_EPSILON),
"epsilon_insensitive": (EpsilonInsensitive, DEFAULT_EPSILON),
"squared_epsilon_insensitive": (SquaredEpsilonInsensitive,
DEFAULT_EPSILON),
}
@abstractmethod
def __init__(self, loss="squared_loss", penalty="l2", alpha=0.0001,
l1_ratio=0.15, fit_intercept=True, n_iter=5, shuffle=True,
verbose=0, epsilon=DEFAULT_EPSILON, random_state=None,
learning_rate="invscaling", eta0=0.01, power_t=0.25,
warm_start=False, average=False):
super(BaseSGDRegressor, self).__init__(loss=loss, penalty=penalty,
alpha=alpha, l1_ratio=l1_ratio,
fit_intercept=fit_intercept,
n_iter=n_iter, shuffle=shuffle,
verbose=verbose,
epsilon=epsilon,
random_state=random_state,
learning_rate=learning_rate,
eta0=eta0, power_t=power_t,
warm_start=warm_start,
average=average)
def _partial_fit(self, X, y, alpha, C, loss, learning_rate,
n_iter, sample_weight,
coef_init, intercept_init):
X, y = check_X_y(X, y, "csr", copy=False, order='C', dtype=np.float64)
y = astype(y, np.float64, copy=False)
n_samples, n_features = X.shape
self._validate_params()
# Allocate datastructures from input arguments
sample_weight = self._validate_sample_weight(sample_weight, n_samples)
if self.coef_ is None:
self._allocate_parameter_mem(1, n_features,
coef_init, intercept_init)
elif n_features != self.coef_.shape[-1]:
raise ValueError("Number of features %d does not match previous "
"data %d." % (n_features, self.coef_.shape[-1]))
if self.average > 0 and self.average_coef_ is None:
self.average_coef_ = np.zeros(n_features,
dtype=np.float64,
order="C")
self.average_intercept_ = np.zeros(1,
dtype=np.float64,
order="C")
self._fit_regressor(X, y, alpha, C, loss, learning_rate,
sample_weight, n_iter)
return self
def partial_fit(self, X, y, sample_weight=None):
"""Fit linear model with Stochastic Gradient Descent.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Subset of training data
y : numpy array of shape (n_samples,)
Subset of target values
sample_weight : array-like, shape (n_samples,), optional
Weights applied to individual samples.
If not provided, uniform weights are assumed.
Returns
-------
self : returns an instance of self.
"""
return self._partial_fit(X, y, self.alpha, C=1.0,
loss=self.loss,
learning_rate=self.learning_rate, n_iter=1,
sample_weight=sample_weight,
coef_init=None, intercept_init=None)
def _fit(self, X, y, alpha, C, loss, learning_rate, coef_init=None,
intercept_init=None, sample_weight=None):
if self.warm_start and self.coef_ is not None:
if coef_init is None:
coef_init = self.coef_
if intercept_init is None:
intercept_init = self.intercept_
else:
self.coef_ = None
self.intercept_ = None
if self.average > 0:
self.standard_intercept_ = self.intercept_
self.standard_coef_ = self.coef_
self.average_coef_ = None
self.average_intercept_ = None
# Clear iteration count for multiple call to fit.
self.t_ = None
return self._partial_fit(X, y, alpha, C, loss, learning_rate,
self.n_iter, sample_weight,
coef_init, intercept_init)
def fit(self, X, y, coef_init=None, intercept_init=None,
sample_weight=None):
"""Fit linear model with Stochastic Gradient Descent.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data
y : numpy array, shape (n_samples,)
Target values
coef_init : array, shape (n_features,)
The initial coefficients to warm-start the optimization.
intercept_init : array, shape (1,)
The initial intercept to warm-start the optimization.
sample_weight : array-like, shape (n_samples,), optional
Weights applied to individual samples (1. for unweighted).
Returns
-------
self : returns an instance of self.
"""
return self._fit(X, y, alpha=self.alpha, C=1.0,
loss=self.loss, learning_rate=self.learning_rate,
coef_init=coef_init,
intercept_init=intercept_init,
sample_weight=sample_weight)
@deprecated(" and will be removed in 0.19.")
def decision_function(self, X):
"""Predict using the linear model
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Returns
-------
array, shape (n_samples,)
Predicted target values per element in X.
"""
return self._decision_function(X)
def _decision_function(self, X):
"""Predict using the linear model
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Returns
-------
array, shape (n_samples,)
Predicted target values per element in X.
"""
check_is_fitted(self, ["t_", "coef_", "intercept_"], all_or_any=all)
X = check_array(X, accept_sparse='csr')
scores = safe_sparse_dot(X, self.coef_.T,
dense_output=True) + self.intercept_
return scores.ravel()
def predict(self, X):
"""Predict using the linear model
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Returns
-------
array, shape (n_samples,)
Predicted target values per element in X.
"""
return self._decision_function(X)
def _fit_regressor(self, X, y, alpha, C, loss, learning_rate,
sample_weight, n_iter):
dataset, intercept_decay = make_dataset(X, y, sample_weight)
loss_function = self._get_loss_function(loss)
penalty_type = self._get_penalty_type(self.penalty)
learning_rate_type = self._get_learning_rate_type(learning_rate)
if self.t_ is None:
self.t_ = 1.0
random_state = check_random_state(self.random_state)
# numpy mtrand expects a C long which is a signed 32 bit integer under
# Windows
seed = random_state.randint(0, np.iinfo(np.int32).max)
if self.average > 0:
self.standard_coef_, self.standard_intercept_, \
self.average_coef_, self.average_intercept_ =\
average_sgd(self.standard_coef_,
self.standard_intercept_[0],
self.average_coef_,
self.average_intercept_[0],
loss_function,
penalty_type,
alpha, C,
self.l1_ratio,
dataset,
n_iter,
int(self.fit_intercept),
int(self.verbose),
int(self.shuffle),
seed,
1.0, 1.0,
learning_rate_type,
self.eta0, self.power_t, self.t_,
intercept_decay, self.average)
self.average_intercept_ = np.atleast_1d(self.average_intercept_)
self.standard_intercept_ = np.atleast_1d(self.standard_intercept_)
self.t_ += n_iter * X.shape[0]
if self.average <= self.t_ - 1.0:
self.coef_ = self.average_coef_
self.intercept_ = self.average_intercept_
else:
self.coef_ = self.standard_coef_
self.intercept_ = self.standard_intercept_
else:
self.coef_, self.intercept_ = \
plain_sgd(self.coef_,
self.intercept_[0],
loss_function,
penalty_type,
alpha, C,
self.l1_ratio,
dataset,
n_iter,
int(self.fit_intercept),
int(self.verbose),
int(self.shuffle),
seed,
1.0, 1.0,
learning_rate_type,
self.eta0, self.power_t, self.t_,
intercept_decay)
self.t_ += n_iter * X.shape[0]
self.intercept_ = np.atleast_1d(self.intercept_)
class SGDRegressor(BaseSGDRegressor, _LearntSelectorMixin):
"""Linear model fitted by minimizing a regularized empirical loss with SGD
SGD stands for Stochastic Gradient Descent: the gradient of the loss is
estimated each sample at a time and the model is updated along the way with
a decreasing strength schedule (aka learning rate).
The regularizer is a penalty added to the loss function that shrinks model
parameters towards the zero vector using either the squared euclidean norm
L2 or the absolute norm L1 or a combination of both (Elastic Net). If the
parameter update crosses the 0.0 value because of the regularizer, the
update is truncated to 0.0 to allow for learning sparse models and achieve
online feature selection.
This implementation works with data represented as dense numpy arrays of
floating point values for the features.
Read more in the :ref:`User Guide <sgd>`.
Parameters
----------
loss : str, 'squared_loss', 'huber', 'epsilon_insensitive', \
or 'squared_epsilon_insensitive'
The loss function to be used. Defaults to 'squared_loss' which refers
to the ordinary least squares fit. 'huber' modifies 'squared_loss' to
focus less on getting outliers correct by switching from squared to
linear loss past a distance of epsilon. 'epsilon_insensitive' ignores
errors less than epsilon and is linear past that; this is the loss
function used in SVR. 'squared_epsilon_insensitive' is the same but
becomes squared loss past a tolerance of epsilon.
penalty : str, 'none', 'l2', 'l1', or 'elasticnet'
The penalty (aka regularization term) to be used. Defaults to 'l2'
which is the standard regularizer for linear SVM models. 'l1' and
'elasticnet' might bring sparsity to the model (feature selection)
not achievable with 'l2'.
alpha : float
Constant that multiplies the regularization term. Defaults to 0.0001
Also used to compute learning_rate when set to 'optimal'.
l1_ratio : float
The Elastic Net mixing parameter, with 0 <= l1_ratio <= 1.
l1_ratio=0 corresponds to L2 penalty, l1_ratio=1 to L1.
Defaults to 0.15.
fit_intercept : bool
Whether the intercept should be estimated or not. If False, the
data is assumed to be already centered. Defaults to True.
n_iter : int, optional
The number of passes over the training data (aka epochs). The number
of iterations is set to 1 if using partial_fit.
Defaults to 5.
shuffle : bool, optional
Whether or not the training data should be shuffled after each epoch.
Defaults to True.
random_state : int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use when
shuffling the data.
verbose : integer, optional
The verbosity level.
epsilon : float
Epsilon in the epsilon-insensitive loss functions; only if `loss` is
'huber', 'epsilon_insensitive', or 'squared_epsilon_insensitive'.
For 'huber', determines the threshold at which it becomes less
important to get the prediction exactly right.
For epsilon-insensitive, any differences between the current prediction
and the correct label are ignored if they are less than this threshold.
learning_rate : string, optional
The learning rate:
constant: eta = eta0
optimal: eta = 1.0/(alpha * t)
invscaling: eta = eta0 / pow(t, power_t) [default]
eta0 : double, optional
The initial learning rate [default 0.01].
power_t : double, optional
The exponent for inverse scaling learning rate [default 0.25].
warm_start : bool, optional
When set to True, reuse the solution of the previous call to fit as
initialization, otherwise, just erase the previous solution.
average : bool or int, optional
When set to True, computes the averaged SGD weights and stores the
result in the ``coef_`` attribute. If set to an int greater than 1,
averaging will begin once the total number of samples seen reaches
average. So ``average=10 will`` begin averaging after seeing 10
samples.
Attributes
----------
coef_ : array, shape (n_features,)
Weights assigned to the features.
intercept_ : array, shape (1,)
The intercept term.
average_coef_ : array, shape (n_features,)
Averaged weights assigned to the features.
average_intercept_ : array, shape (1,)
The averaged intercept term.
Examples
--------
>>> import numpy as np
>>> from sklearn import linear_model
>>> n_samples, n_features = 10, 5
>>> np.random.seed(0)
>>> y = np.random.randn(n_samples)
>>> X = np.random.randn(n_samples, n_features)
>>> clf = linear_model.SGDRegressor()
>>> clf.fit(X, y)
... #doctest: +NORMALIZE_WHITESPACE
SGDRegressor(alpha=0.0001, average=False, epsilon=0.1, eta0=0.01,
fit_intercept=True, l1_ratio=0.15, learning_rate='invscaling',
loss='squared_loss', n_iter=5, penalty='l2', power_t=0.25,
random_state=None, shuffle=True, verbose=0, warm_start=False)
See also
--------
Ridge, ElasticNet, Lasso, SVR
"""
def __init__(self, loss="squared_loss", penalty="l2", alpha=0.0001,
l1_ratio=0.15, fit_intercept=True, n_iter=5, shuffle=True,
verbose=0, epsilon=DEFAULT_EPSILON, random_state=None,
learning_rate="invscaling", eta0=0.01, power_t=0.25,
warm_start=False, average=False):
super(SGDRegressor, self).__init__(loss=loss, penalty=penalty,
alpha=alpha, l1_ratio=l1_ratio,
fit_intercept=fit_intercept,
n_iter=n_iter, shuffle=shuffle,
verbose=verbose,
epsilon=epsilon,
random_state=random_state,
learning_rate=learning_rate,
eta0=eta0, power_t=power_t,
warm_start=warm_start,
average=average)
| bsd-3-clause |